aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c207
-rw-r--r--net/core/dev.c343
-rw-r--r--net/core/dev_addr_lists.c100
-rw-r--r--net/core/dev_ioctl.c4
-rw-r--r--net/core/devlink.c1928
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/ethtool.c274
-rw-r--r--net/core/filter.c1178
-rw-r--r--net/core/flow_dissector.c95
-rw-r--r--net/core/flow_offload.c153
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/gro_cells.c1
-rw-r--r--net/core/lwt_bpf.c266
-rw-r--r--net/core/lwtunnel.c16
-rw-r--r--net/core/neighbour.c485
-rw-r--r--net/core/net-sysfs.c29
-rw-r--r--net/core/net-traces.c8
-rw-r--r--net/core/net_namespace.c200
-rw-r--r--net/core/netclassid_cgroup.c1
-rw-r--r--net/core/netpoll.c9
-rw-r--r--net/core/page_pool.c22
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c667
-rw-r--r--net/core/scm.c27
-rw-r--r--net/core/skbuff.c329
-rw-r--r--net/core/skmsg.c50
-rw-r--r--net/core/sock.c281
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/core/stream.c2
-rw-r--r--net/core/sysctl_net_core.c42
31 files changed, 5558 insertions, 1173 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index fccd31e0e7f7..f97d6254e564 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o
+ fib_notifier.o xdp.o flow_offload.o
obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 6a034eb538a1..b2651bb6d2a3 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -408,27 +408,20 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
EXPORT_SYMBOL(skb_kill_datagram);
-/**
- * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
- * @skb: buffer to copy
- * @offset: offset in the buffer to start copying from
- * @to: iovec iterator to copy to
- * @len: amount of data to copy from buffer to iovec
- */
-int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len)
+int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, bool fault_short,
+ size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
+ void *data)
{
int start = skb_headlen(skb);
int i, copy = start - offset, start_off = offset, n;
struct sk_buff *frag_iter;
- trace_skb_copy_datagram_iovec(skb, len);
-
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
- n = copy_to_iter(skb->data + offset, copy, to);
+ n = cb(skb->data + offset, copy, data, to);
offset += n;
if (n != copy)
goto short_copy;
@@ -445,11 +438,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
+ struct page *page = skb_frag_page(frag);
+ u8 *vaddr = kmap(page);
+
if (copy > len)
copy = len;
- n = copy_page_to_iter(skb_frag_page(frag),
- frag->page_offset + offset -
- start, copy, to);
+ n = cb(vaddr + frag->page_offset +
+ offset - start, copy, data, to);
+ kunmap(page);
offset += n;
if (n != copy)
goto short_copy;
@@ -468,8 +464,8 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_datagram_iter(frag_iter, offset - start,
- to, copy))
+ if (__skb_datagram_iter(frag_iter, offset - start,
+ to, copy, fault_short, cb, data))
goto fault;
if ((len -= copy) == 0)
return 0;
@@ -490,11 +486,50 @@ fault:
return -EFAULT;
short_copy:
- if (iov_iter_count(to))
+ if (fault_short || iov_iter_count(to))
goto fault;
return 0;
}
+
+/**
+ * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
+ * and update a hash.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying from
+ * @to: iovec iterator to copy to
+ * @len: amount of data to copy from buffer to iovec
+ * @hash: hash request to update
+ */
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len,
+ struct ahash_request *hash)
+{
+ return __skb_datagram_iter(skb, offset, to, len, true,
+ hash_and_copy_to_iter, hash);
+}
+EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
+
+static size_t simple_copy_to_iter(const void *addr, size_t bytes,
+ void *data __always_unused, struct iov_iter *i)
+{
+ return copy_to_iter(addr, bytes, i);
+}
+
+/**
+ * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying from
+ * @to: iovec iterator to copy to
+ * @len: amount of data to copy from buffer to iovec
+ */
+int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len)
+{
+ trace_skb_copy_datagram_iovec(skb, len);
+ return __skb_datagram_iter(skb, offset, to, len, false,
+ simple_copy_to_iter, NULL);
+}
EXPORT_SYMBOL(skb_copy_datagram_iter);
/**
@@ -645,132 +680,23 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
}
EXPORT_SYMBOL(zerocopy_sg_from_iter);
+/**
+ * skb_copy_and_csum_datagram_iter - Copy datagram to an iovec iterator
+ * and update a checksum.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying from
+ * @to: iovec iterator to copy to
+ * @len: amount of data to copy from buffer to iovec
+ * @csump: checksum pointer
+ */
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len,
__wsum *csump)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset, start_off = offset;
- struct sk_buff *frag_iter;
- int pos = 0;
- int n;
-
- /* Copy header. */
- if (copy > 0) {
- if (copy > len)
- copy = len;
- n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
- offset += n;
- if (n != copy)
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- pos = copy;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- WARN_ON(start > offset + len);
-
- end = start + skb_frag_size(frag);
- if ((copy = end - offset) > 0) {
- __wsum csum2 = 0;
- struct page *page = skb_frag_page(frag);
- u8 *vaddr = kmap(page);
-
- if (copy > len)
- copy = len;
- n = csum_and_copy_to_iter(vaddr + frag->page_offset +
- offset - start, copy,
- &csum2, to);
- kunmap(page);
- offset += n;
- if (n != copy)
- goto fault;
- *csump = csum_block_add(*csump, csum2, pos);
- if (!(len -= copy))
- return 0;
- pos += copy;
- }
- start = end;
- }
-
- skb_walk_frags(skb, frag_iter) {
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + frag_iter->len;
- if ((copy = end - offset) > 0) {
- __wsum csum2 = 0;
- if (copy > len)
- copy = len;
- if (skb_copy_and_csum_datagram(frag_iter,
- offset - start,
- to, copy,
- &csum2))
- goto fault;
- *csump = csum_block_add(*csump, csum2, pos);
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- pos += copy;
- }
- start = end;
- }
- if (!len)
- return 0;
-
-fault:
- iov_iter_revert(to, offset - start_off);
- return -EFAULT;
+ return __skb_datagram_iter(skb, offset, to, len, true,
+ csum_and_copy_to_iter, csump);
}
-__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
-{
- __sum16 sum;
-
- sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
- if (likely(!sum)) {
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
- !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
- }
- if (!skb_shared(skb))
- skb->csum_valid = !sum;
- return sum;
-}
-EXPORT_SYMBOL(__skb_checksum_complete_head);
-
-__sum16 __skb_checksum_complete(struct sk_buff *skb)
-{
- __wsum csum;
- __sum16 sum;
-
- csum = skb_checksum(skb, 0, skb->len, 0);
-
- /* skb->csum holds pseudo checksum */
- sum = csum_fold(csum_add(skb->csum, csum));
- if (likely(!sum)) {
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
- !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
- }
-
- if (!skb_shared(skb)) {
- /* Save full packet checksum */
- skb->csum = csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum_complete_sw = 1;
- skb->csum_valid = !sum;
- }
-
- return sum;
-}
-EXPORT_SYMBOL(__skb_checksum_complete);
-
/**
* skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
* @skb: skbuff
@@ -808,8 +734,9 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
return -EINVAL;
}
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
- netdev_rx_csum_fault(skb->dev);
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(NULL, skb);
}
return 0;
fault:
diff --git a/net/core/dev.c b/net/core/dev.c
index 022ad73d6253..2b67f2aa59dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -145,6 +145,7 @@
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
#include <linux/net_namespace.h>
+#include <linux/indirect_call_wrapper.h>
#include "net-sysfs.h"
@@ -162,6 +163,9 @@ static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info);
+static int call_netdevice_notifiers_extack(unsigned long val,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
static struct napi_struct *napi_by_id(unsigned int napi_id);
/*
@@ -1361,7 +1365,7 @@ void netdev_notify_peers(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_notify_peers);
-static int __dev_open(struct net_device *dev)
+static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
int ret;
@@ -1377,7 +1381,7 @@ static int __dev_open(struct net_device *dev)
*/
netpoll_poll_disable(dev);
- ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
+ ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
ret = notifier_to_errno(ret);
if (ret)
return ret;
@@ -1406,7 +1410,8 @@ static int __dev_open(struct net_device *dev)
/**
* dev_open - prepare an interface for use.
- * @dev: device to open
+ * @dev: device to open
+ * @extack: netlink extended ack
*
* Takes a device from down to up state. The device's private open
* function is invoked and then the multicast lists are loaded. Finally
@@ -1416,14 +1421,14 @@ static int __dev_open(struct net_device *dev)
* Calling this function on an active interface is a nop. On a failure
* a negative errno code is returned.
*/
-int dev_open(struct net_device *dev)
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{
int ret;
if (dev->flags & IFF_UP)
return 0;
- ret = __dev_open(dev);
+ ret = __dev_open(dev, extack);
if (ret < 0)
return ret;
@@ -1585,6 +1590,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
+ N(PRE_CHANGEADDR)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
@@ -1733,6 +1739,18 @@ static int call_netdevice_notifiers_info(unsigned long val,
return raw_notifier_call_chain(&netdev_chain, val, info);
}
+static int call_netdevice_notifiers_extack(unsigned long val,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_notifier_info info = {
+ .dev = dev,
+ .extack = extack,
+ };
+
+ return call_netdevice_notifiers_info(val, &info);
+}
+
/**
* call_netdevice_notifiers - call all network notifier blocks
* @val: value passed unmodified to notifier function
@@ -1744,11 +1762,7 @@ static int call_netdevice_notifiers_info(unsigned long val,
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
- struct netdev_notifier_info info = {
- .dev = dev,
- };
-
- return call_netdevice_notifiers_info(val, &info);
+ return call_netdevice_notifiers_extack(val, dev, NULL);
}
EXPORT_SYMBOL(call_netdevice_notifiers);
@@ -1807,7 +1821,7 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
#endif
static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
static void netstamp_clear(struct work_struct *work)
@@ -1826,7 +1840,7 @@ static DECLARE_WORK(netstamp_work, netstamp_clear);
void net_enable_timestamp(void)
{
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
int wanted;
while (1) {
@@ -1846,7 +1860,7 @@ EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
int wanted;
while (1) {
@@ -2175,6 +2189,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
return active;
}
+static void reset_xps_maps(struct net_device *dev,
+ struct xps_dev_maps *dev_maps,
+ bool is_rxqs_map)
+{
+ if (is_rxqs_map) {
+ static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+ RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+ } else {
+ RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+ }
+ static_key_slow_dec_cpuslocked(&xps_needed);
+ kfree_rcu(dev_maps, rcu);
+}
+
static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
struct xps_dev_maps *dev_maps, unsigned int nr_ids,
u16 offset, u16 count, bool is_rxqs_map)
@@ -2186,18 +2214,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
j < nr_ids;)
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
count);
- if (!active) {
- if (is_rxqs_map) {
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
- } else {
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+ if (!active)
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
- for (i = offset + (count - 1); count--; i--)
- netdev_queue_numa_node_write(
- netdev_get_tx_queue(dev, i),
- NUMA_NO_NODE);
+ if (!is_rxqs_map) {
+ for (i = offset + (count - 1); count--; i--) {
+ netdev_queue_numa_node_write(
+ netdev_get_tx_queue(dev, i),
+ NUMA_NO_NODE);
}
- kfree_rcu(dev_maps, rcu);
}
}
@@ -2234,10 +2259,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
false);
out_no_maps:
- if (static_key_enabled(&xps_rxqs_needed))
- static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
-
- static_key_slow_dec_cpuslocked(&xps_needed);
mutex_unlock(&xps_map_mutex);
cpus_read_unlock();
}
@@ -2355,9 +2376,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
if (!new_dev_maps)
goto out_no_new_maps;
- static_key_slow_inc_cpuslocked(&xps_needed);
- if (is_rxqs_map)
- static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+ if (!dev_maps) {
+ /* Increment static keys at most once per type */
+ static_key_slow_inc_cpuslocked(&xps_needed);
+ if (is_rxqs_map)
+ static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+ }
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
j < nr_ids;) {
@@ -2455,13 +2479,8 @@ out_no_new_maps:
}
/* free map if not active */
- if (!active) {
- if (is_rxqs_map)
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
- else
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
- kfree_rcu(dev_maps, rcu);
- }
+ if (!active)
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
out_no_maps:
mutex_unlock(&xps_map_mutex);
@@ -3091,10 +3110,17 @@ EXPORT_SYMBOL(__skb_gso_segment);
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev)
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
if (net_ratelimit()) {
pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ if (dev)
+ pr_err("dev features: %pNF\n", &dev->features);
+ pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n",
+ skb->len, skb->data_len, skb->pkt_type,
+ skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type,
+ skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum,
+ skb->csum_complete_sw, skb->csum_valid, skb->csum_level);
dump_stack();
}
}
@@ -3272,7 +3298,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
}
skb = next;
- if (netif_xmit_stopped(txq) && skb) {
+ if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}
@@ -3395,7 +3421,7 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
/* To get more precise estimation of bytes sent on wire,
* we add to pkt_len the headers size of all segments
*/
- if (shinfo->gso_size) {
+ if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
unsigned int hdr_len;
u16 gso_segs = shinfo->gso_segs;
@@ -4520,9 +4546,14 @@ static int netif_rx_internal(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb)
{
+ int ret;
+
trace_netif_rx_entry(skb);
- return netif_rx_internal(skb);
+ ret = netif_rx_internal(skb);
+ trace_netif_rx_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -4537,6 +4568,7 @@ int netif_rx_ni(struct sk_buff *skb)
if (local_softirq_pending())
do_softirq();
preempt_enable();
+ trace_netif_rx_ni_exit(err);
return err;
}
@@ -4889,7 +4921,7 @@ skip_classify:
* and set skb->priority like in vlan_do_receive()
* For the time being, just ignore Priority Code Point
*/
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
type = skb->protocol;
@@ -5009,7 +5041,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
- list_del(&skb->list);
+ skb_list_del_init(skb);
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
if (!pt_prev)
continue;
@@ -5165,7 +5197,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(netdev_tstamp_prequeue, skb);
- list_del(&skb->list);
+ skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
}
@@ -5176,7 +5208,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock();
list_for_each_entry_safe(skb, next, head, list) {
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
- list_del(&skb->list);
+ skb_list_del_init(skb);
if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
list_add_tail(&skb->list, &sublist);
}
@@ -5195,7 +5227,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
if (cpu >= 0) {
/* Will be handled, remove from list */
- list_del(&skb->list);
+ skb_list_del_init(skb);
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
}
}
@@ -5222,9 +5254,14 @@ static void netif_receive_skb_list_internal(struct list_head *head)
*/
int netif_receive_skb(struct sk_buff *skb)
{
+ int ret;
+
trace_netif_receive_skb_entry(skb);
- return netif_receive_skb_internal(skb);
+ ret = netif_receive_skb_internal(skb);
+ trace_netif_receive_skb_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(netif_receive_skb);
@@ -5244,9 +5281,12 @@ void netif_receive_skb_list(struct list_head *head)
if (list_empty(head))
return;
- list_for_each_entry(skb, head, list)
- trace_netif_receive_skb_list_entry(skb);
+ if (trace_netif_receive_skb_list_entry_enabled()) {
+ list_for_each_entry(skb, head, list)
+ trace_netif_receive_skb_list_entry(skb);
+ }
netif_receive_skb_list_internal(head);
+ trace_netif_receive_skb_list_exit(0);
}
EXPORT_SYMBOL(netif_receive_skb_list);
@@ -5299,6 +5339,8 @@ static void flush_all_backlogs(void)
put_online_cpus();
}
+INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
static int napi_gro_complete(struct sk_buff *skb)
{
struct packet_offload *ptype;
@@ -5318,7 +5360,9 @@ static int napi_gro_complete(struct sk_buff *skb)
if (ptype->type != type || !ptype->callbacks.gro_complete)
continue;
- err = ptype->callbacks.gro_complete(skb, 0);
+ err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
+ ipv6_gro_complete, inet_gro_complete,
+ skb, 0);
break;
}
rcu_read_unlock();
@@ -5357,11 +5401,13 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
*/
void napi_gro_flush(struct napi_struct *napi, bool flush_old)
{
- u32 i;
+ unsigned long bitmask = napi->gro_bitmask;
+ unsigned int i, base = ~0U;
- for (i = 0; i < GRO_HASH_BUCKETS; i++) {
- if (test_bit(i, &napi->gro_bitmask))
- __napi_gro_flush_chain(napi, i, flush_old);
+ while ((i = ffs(bitmask)) != 0) {
+ bitmask >>= i;
+ base += i;
+ __napi_gro_flush_chain(napi, base, flush_old);
}
}
EXPORT_SYMBOL(napi_gro_flush);
@@ -5386,7 +5432,9 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
}
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
- diffs |= p->vlan_tci ^ skb->vlan_tci;
+ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+ if (skb_vlan_tag_present(p))
+ diffs |= p->vlan_tci ^ skb->vlan_tci;
diffs |= skb_metadata_dst_cmp(p, skb);
diffs |= skb_metadata_differs(p, skb);
if (maclen == ETH_HLEN)
@@ -5457,10 +5505,14 @@ static void gro_flush_oldest(struct list_head *head)
/* Do not adjust napi->gro_hash[].count, caller is adding a new
* SKB to the chain.
*/
- list_del(&oldest->list);
+ skb_list_del_init(oldest);
napi_gro_complete(oldest);
}
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
+ struct sk_buff *));
static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
@@ -5510,7 +5562,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->csum_valid = 0;
}
- pp = ptype->callbacks.gro_receive(gro_head, skb);
+ pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+ ipv6_gro_receive, inet_gro_receive,
+ gro_head, skb);
break;
}
rcu_read_unlock();
@@ -5634,12 +5688,17 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
+ gro_result_t ret;
+
skb_mark_napi_id(skb, napi);
trace_napi_gro_receive_entry(skb);
skb_gro_reset_offset(skb);
- return napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ trace_napi_gro_receive_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(napi_gro_receive);
@@ -5652,9 +5711,13 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
__skb_pull(skb, skb_headlen(skb));
/* restore the reserve we had after netdev_alloc_skb_ip_align() */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb->dev = napi->dev;
skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@ -5753,6 +5816,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
+ gro_result_t ret;
struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
@@ -5760,7 +5824,10 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
trace_napi_gro_frags_entry(skb);
- return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_frags_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(napi_gro_frags);
@@ -5776,10 +5843,11 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
+ /* See comments in __skb_checksum_complete(). */
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
+ netdev_rx_csum_fault(skb->dev, skb);
}
NAPI_GRO_CB(skb)->csum = wsum;
@@ -5966,11 +6034,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
if (work_done)
timeout = n->dev->gro_flush_timeout;
+ /* When the NAPI instance uses a timeout and keeps postponing
+ * it, we need to bound somehow the time packets are kept in
+ * the GRO layer
+ */
+ napi_gro_flush(n, !!timeout);
if (timeout)
hrtimer_start(&n->timer, ns_to_ktime(timeout),
HRTIMER_MODE_REL_PINNED);
- else
- napi_gro_flush(n, false);
}
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */
@@ -6197,8 +6268,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
napi->skb = NULL;
napi->poll = poll;
if (weight > NAPI_POLL_WEIGHT)
- pr_err_once("netif_napi_add() called with weight %d on device %s\n",
- weight, dev->name);
+ netdev_err_once(dev, "%s() called with weight %d\n", __func__,
+ weight);
napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
@@ -7455,7 +7526,8 @@ unsigned int dev_get_flags(const struct net_device *dev)
}
EXPORT_SYMBOL(dev_get_flags);
-int __dev_change_flags(struct net_device *dev, unsigned int flags)
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack)
{
unsigned int old_flags = dev->flags;
int ret;
@@ -7492,7 +7564,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
if (old_flags & IFF_UP)
__dev_close(dev);
else
- ret = __dev_open(dev);
+ ret = __dev_open(dev, extack);
}
if ((flags ^ dev->gflags) & IFF_PROMISC) {
@@ -7552,16 +7624,18 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
* dev_change_flags - change device settings
* @dev: device
* @flags: device state flags
+ * @extack: netlink extended ack
*
* Change settings on device based state flags. The flags are
* in the userspace exported format.
*/
-int dev_change_flags(struct net_device *dev, unsigned int flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack)
{
int ret;
unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
- ret = __dev_change_flags(dev, flags);
+ ret = __dev_change_flags(dev, flags, extack);
if (ret < 0)
return ret;
@@ -7694,13 +7768,36 @@ void dev_set_group(struct net_device *dev, int new_group)
EXPORT_SYMBOL(dev_set_group);
/**
+ * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
+ * @dev: device
+ * @addr: new address
+ * @extack: netlink extended ack
+ */
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_notifier_pre_changeaddr_info info = {
+ .info.dev = dev,
+ .info.extack = extack,
+ .dev_addr = addr,
+ };
+ int rc;
+
+ rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
+ return notifier_to_errno(rc);
+}
+EXPORT_SYMBOL(dev_pre_changeaddr_notify);
+
+/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
+ * @extack: netlink extended ack
*
* Change the hardware (MAC) address of the device
*/
-int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
@@ -7711,6 +7808,9 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
+ err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
+ if (err)
+ return err;
err = ops->ndo_set_mac_address(dev, sa);
if (err)
return err;
@@ -7778,6 +7878,63 @@ int dev_get_phys_port_name(struct net_device *dev,
EXPORT_SYMBOL(dev_get_phys_port_name);
/**
+ * dev_get_port_parent_id - Get the device's port parent identifier
+ * @dev: network device
+ * @ppid: pointer to a storage for the port's parent identifier
+ * @recurse: allow/disallow recursion to lower devices
+ *
+ * Get the devices's port parent identifier
+ */
+int dev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid,
+ bool recurse)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct netdev_phys_item_id first = { };
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (ops->ndo_get_port_parent_id)
+ return ops->ndo_get_port_parent_id(dev, ppid);
+
+ if (!recurse)
+ return err;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ if (err)
+ break;
+ if (!first.id_len)
+ first = *ppid;
+ else if (memcmp(&first, ppid, sizeof(*ppid)))
+ return -ENODATA;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(dev_get_port_parent_id);
+
+/**
+ * netdev_port_same_parent_id - Indicate if two network devices have
+ * the same port parent identifier
+ * @a: first network device
+ * @b: second network device
+ */
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
+{
+ struct netdev_phys_item_id a_id = { };
+ struct netdev_phys_item_id b_id = { };
+
+ if (dev_get_port_parent_id(a, &a_id, true) ||
+ dev_get_port_parent_id(b, &b_id, true))
+ return false;
+
+ return netdev_phys_item_id_same(&a_id, &b_id);
+}
+EXPORT_SYMBOL(netdev_port_same_parent_id);
+
+/**
* dev_change_proto_down - update protocol port state information
* @dev: device
* @proto_down: new value
@@ -7797,6 +7954,25 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
}
EXPORT_SYMBOL(dev_change_proto_down);
+/**
+ * dev_change_proto_down_generic - generic implementation for
+ * ndo_change_proto_down that sets carrier according to
+ * proto_down.
+ *
+ * @dev: device
+ * @proto_down: new value
+ */
+int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
+{
+ if (proto_down)
+ netif_carrier_off(dev);
+ else
+ netif_carrier_on(dev);
+ dev->proto_down = proto_down;
+ return 0;
+}
+EXPORT_SYMBOL(dev_change_proto_down_generic);
+
u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
enum bpf_netdev_command cmd)
{
@@ -7876,35 +8052,41 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
enum bpf_netdev_command query;
struct bpf_prog *prog = NULL;
bpf_op_t bpf_op, bpf_chk;
+ bool offload;
int err;
ASSERT_RTNL();
- query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
+ offload = flags & XDP_FLAGS_HW_MODE;
+ query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
bpf_op = bpf_chk = ops->ndo_bpf;
- if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
+ if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
+ NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
return -EOPNOTSUPP;
+ }
if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
bpf_op = generic_xdp_install;
if (bpf_op == bpf_chk)
bpf_chk = generic_xdp_install;
if (fd >= 0) {
- if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
- __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
+ if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
+ NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
return -EEXIST;
+ }
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
- __dev_xdp_query(dev, bpf_op, query))
+ __dev_xdp_query(dev, bpf_op, query)) {
+ NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
+ }
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
bpf_op == ops->ndo_bpf);
if (IS_ERR(prog))
return PTR_ERR(prog);
- if (!(flags & XDP_FLAGS_HW_MODE) &&
- bpf_prog_is_dev_bound(prog->aux)) {
+ if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
bpf_prog_put(prog);
return -EINVAL;
@@ -8052,7 +8234,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(upper->wanted_features & feature)
&& (features & feature)) {
@@ -8072,7 +8254,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
@@ -8612,6 +8794,9 @@ int init_dummy_netdev(struct net_device *dev)
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
+ /* napi_busy_loop stats accounting wants this */
+ dev_net_set(dev, &init_net);
+
/* Note : We dont allocate pcpu_refcnt for dummy devices,
* because users of this 'device' dont need to change
* its refcount.
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index d884d8f5f0e5..a6723b306717 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -278,6 +278,103 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
EXPORT_SYMBOL(__hw_addr_sync_dev);
/**
+ * __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking
+ * into account references
+ * @list: address list to synchronize
+ * @dev: device to sync
+ * @sync: function to call if address or reference on it should be added
+ * @unsync: function to call if address or some reference on it should removed
+ *
+ * This function is intended to be called from the ndo_set_rx_mode
+ * function of devices that require explicit address or references on it
+ * add/remove notifications. The unsync function may be NULL in which case
+ * the addresses or references on it requiring removal will simply be
+ * removed without any notification to the device. That is responsibility of
+ * the driver to identify and distribute address or references on it between
+ * internal address tables.
+ **/
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *, int),
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int))
+{
+ struct netdev_hw_addr *ha, *tmp;
+ int err, ref_cnt;
+
+ /* first go through and flush out any unsynced/stale entries */
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ /* sync if address is not used */
+ if ((ha->sync_cnt << 1) <= ha->refcount)
+ continue;
+
+ /* if fails defer unsyncing address */
+ ref_cnt = ha->refcount - ha->sync_cnt;
+ if (unsync && unsync(dev, ha->addr, ref_cnt))
+ continue;
+
+ ha->refcount = (ref_cnt << 1) + 1;
+ ha->sync_cnt = ref_cnt;
+ __hw_addr_del_entry(list, ha, false, false);
+ }
+
+ /* go through and sync updated/new entries to the list */
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ /* sync if address added or reused */
+ if ((ha->sync_cnt << 1) >= ha->refcount)
+ continue;
+
+ ref_cnt = ha->refcount - ha->sync_cnt;
+ err = sync(dev, ha->addr, ref_cnt);
+ if (err)
+ return err;
+
+ ha->refcount = ref_cnt << 1;
+ ha->sync_cnt = ref_cnt;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
+
+/**
+ * __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on
+ * it from device
+ * @list: address list to remove synchronized addresses (references on it) from
+ * @dev: device to sync
+ * @unsync: function to call if address and references on it should be removed
+ *
+ * Remove all addresses that were added to the device by
+ * __hw_addr_ref_sync_dev(). This function is intended to be called from the
+ * ndo_stop or ndo_open functions on devices that require explicit address (or
+ * references on it) add/remove notifications. If the unsync function pointer
+ * is NULL then this function can be used to just reset the sync_cnt for the
+ * addresses in the list.
+ **/
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int))
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ if (!ha->sync_cnt)
+ continue;
+
+ /* if fails defer unsyncing address */
+ if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
+ continue;
+
+ ha->refcount -= ha->sync_cnt - 1;
+ ha->sync_cnt = 0;
+ __hw_addr_del_entry(list, ha, false, false);
+ }
+}
+EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
+
+/**
* __hw_addr_unsync_dev - Remove synchronized addresses from device
* @list: address list to remove synchronized addresses from
* @dev: device to sync
@@ -401,6 +498,9 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
ASSERT_RTNL();
+ err = dev_pre_changeaddr_notify(dev, addr, NULL);
+ if (err)
+ return err;
err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 90e8aa36881e..31380fd5a4e2 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -234,7 +234,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
switch (cmd) {
case SIOCSIFFLAGS: /* Set interface flags */
- return dev_change_flags(dev, ifr->ifr_flags);
+ return dev_change_flags(dev, ifr->ifr_flags, NULL);
case SIOCSIFMETRIC: /* Set the metric on the interface
(currently unused) */
@@ -246,7 +246,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
case SIOCSIFHWADDR:
if (dev->addr_len > sizeof(struct sockaddr))
return -EINVAL;
- return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr, NULL);
case SIOCSIFHWBROADCAST:
if (ifr->ifr_hwaddr.sa_family != dev->type)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 3a4b29a13d31..78e22cea4cc7 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -81,6 +81,7 @@ struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
EXPORT_SYMBOL(devlink_dpipe_header_ipv6);
EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
static LIST_HEAD(devlink_list);
@@ -115,6 +116,8 @@ static struct devlink *devlink_get_from_attrs(struct net *net,
busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
+ lockdep_assert_held(&devlink_mutex);
+
list_for_each_entry(devlink, &devlink_list, list) {
if (strcmp(devlink->dev->bus->name, busname) == 0 &&
strcmp(dev_name(devlink->dev), devname) == 0 &&
@@ -720,7 +723,7 @@ static int devlink_port_type_set(struct devlink *devlink,
{
int err;
- if (devlink->ops && devlink->ops->port_type_set) {
+ if (devlink->ops->port_type_set) {
if (port_type == DEVLINK_PORT_TYPE_NOTSET)
return -EINVAL;
if (port_type == devlink_port->type)
@@ -757,7 +760,7 @@ static int devlink_port_split(struct devlink *devlink, u32 port_index,
u32 count, struct netlink_ext_ack *extack)
{
- if (devlink->ops && devlink->ops->port_split)
+ if (devlink->ops->port_split)
return devlink->ops->port_split(devlink, port_index, count,
extack);
return -EOPNOTSUPP;
@@ -783,7 +786,7 @@ static int devlink_port_unsplit(struct devlink *devlink, u32 port_index,
struct netlink_ext_ack *extack)
{
- if (devlink->ops && devlink->ops->port_unsplit)
+ if (devlink->ops->port_unsplit)
return devlink->ops->port_unsplit(devlink, port_index, extack);
return -EOPNOTSUPP;
}
@@ -932,6 +935,9 @@ static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
pool_info.threshold_type))
goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
+ pool_info.cell_size))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
@@ -955,7 +961,7 @@ static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
if (err)
return err;
- if (!devlink->ops || !devlink->ops->sb_pool_get)
+ if (!devlink->ops->sb_pool_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -1011,7 +1017,7 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
- !devlink->ops || !devlink->ops->sb_pool_get)
+ !devlink->ops->sb_pool_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
@@ -1040,7 +1046,7 @@ static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
{
const struct devlink_ops *ops = devlink->ops;
- if (ops && ops->sb_pool_set)
+ if (ops->sb_pool_set)
return ops->sb_pool_set(devlink, sb_index, pool_index,
size, threshold_type);
return -EOPNOTSUPP;
@@ -1145,7 +1151,7 @@ static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
if (err)
return err;
- if (!devlink->ops || !devlink->ops->sb_port_pool_get)
+ if (!devlink->ops->sb_port_pool_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -1207,7 +1213,7 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
- !devlink->ops || !devlink->ops->sb_port_pool_get)
+ !devlink->ops->sb_port_pool_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
@@ -1236,7 +1242,7 @@ static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
- if (ops && ops->sb_port_pool_set)
+ if (ops->sb_port_pool_set)
return ops->sb_port_pool_set(devlink_port, sb_index,
pool_index, threshold);
return -EOPNOTSUPP;
@@ -1349,7 +1355,7 @@ static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
if (err)
return err;
- if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+ if (!devlink->ops->sb_tc_pool_bind_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -1433,7 +1439,7 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
- !devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+ !devlink->ops->sb_tc_pool_bind_get)
continue;
mutex_lock(&devlink->lock);
@@ -1465,7 +1471,7 @@ static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
- if (ops && ops->sb_tc_pool_bind_set)
+ if (ops->sb_tc_pool_bind_set)
return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
tc_index, pool_type,
pool_index, threshold);
@@ -1513,7 +1519,7 @@ static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
struct devlink_sb *devlink_sb = info->user_ptr[1];
const struct devlink_ops *ops = devlink->ops;
- if (ops && ops->sb_occ_snapshot)
+ if (ops->sb_occ_snapshot)
return ops->sb_occ_snapshot(devlink, devlink_sb->index);
return -EOPNOTSUPP;
}
@@ -1525,7 +1531,7 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
struct devlink_sb *devlink_sb = info->user_ptr[1];
const struct devlink_ops *ops = devlink->ops;
- if (ops && ops->sb_occ_max_clear)
+ if (ops->sb_occ_max_clear)
return ops->sb_occ_max_clear(devlink, devlink_sb->index);
return -EOPNOTSUPP;
}
@@ -1588,13 +1594,9 @@ static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
- const struct devlink_ops *ops = devlink->ops;
struct sk_buff *msg;
int err;
- if (!ops)
- return -EOPNOTSUPP;
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1619,9 +1621,6 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
int err = 0;
u16 mode;
- if (!ops)
- return -EOPNOTSUPP;
-
if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
if (!ops->eswitch_mode_set)
return -EOPNOTSUPP;
@@ -2656,6 +2655,27 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return devlink->ops->reload(devlink, info->extack);
}
+static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const char *file_name, *component;
+ struct nlattr *nla_component;
+
+ if (!devlink->ops->flash_update)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME])
+ return -EINVAL;
+ file_name = nla_data(info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]);
+
+ nla_component = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT];
+ component = nla_component ? nla_data(nla_component) : NULL;
+
+ return devlink->ops->flash_update(devlink, file_name, component,
+ info->extack);
+}
+
static const struct devlink_param devlink_param_generic[] = {
{
.id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
@@ -2692,6 +2712,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
.type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
+ .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -2838,11 +2863,13 @@ nla_put_failure:
}
static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
+ unsigned int port_index,
struct devlink_param_item *param_item,
enum devlink_command cmd,
u32 portid, u32 seq, int flags)
{
union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+ bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
const struct devlink_param *param = param_item->param;
struct devlink_param_gset_ctx ctx;
struct nlattr *param_values_list;
@@ -2861,12 +2888,15 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
return -EOPNOTSUPP;
param_value[i] = param_item->driverinit_value;
} else {
+ if (!param_item->published)
+ continue;
ctx.cmode = i;
err = devlink_param_get(devlink, param, &ctx);
if (err)
return err;
param_value[i] = ctx.val;
}
+ param_value_set[i] = true;
}
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
@@ -2875,6 +2905,13 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
if (devlink_nl_put_handle(msg, devlink))
goto genlmsg_cancel;
+
+ if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
+ cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
+ cmd == DEVLINK_CMD_PORT_PARAM_DEL)
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
+ goto genlmsg_cancel;
+
param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM);
if (!param_attr)
goto genlmsg_cancel;
@@ -2894,7 +2931,7 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
goto param_nest_cancel;
for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
- if (!devlink_param_cmode_is_supported(param, i))
+ if (!param_value_set[i])
continue;
err = devlink_nl_param_value_fill_one(msg, param->type,
i, param_value[i]);
@@ -2917,18 +2954,22 @@ genlmsg_cancel:
}
static void devlink_param_notify(struct devlink *devlink,
+ unsigned int port_index,
struct devlink_param_item *param_item,
enum devlink_command cmd)
{
struct sk_buff *msg;
int err;
- WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL);
+ WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
+ cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
+ cmd != DEVLINK_CMD_PORT_PARAM_DEL);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- err = devlink_nl_param_fill(msg, devlink, param_item, cmd, 0, 0, 0);
+ err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
+ 0, 0, 0);
if (err) {
nlmsg_free(msg);
return;
@@ -2957,7 +2998,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
idx++;
continue;
}
- err = devlink_nl_param_fill(msg, devlink, param_item,
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
DEVLINK_CMD_PARAM_GET,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -3046,7 +3087,7 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
}
static struct devlink_param_item *
-devlink_param_get_from_info(struct devlink *devlink,
+devlink_param_get_from_info(struct list_head *param_list,
struct genl_info *info)
{
char *param_name;
@@ -3055,7 +3096,7 @@ devlink_param_get_from_info(struct devlink *devlink,
return NULL;
param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
- return devlink_param_find_by_name(&devlink->param_list, param_name);
+ return devlink_param_find_by_name(param_list, param_name);
}
static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
@@ -3066,7 +3107,7 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
struct sk_buff *msg;
int err;
- param_item = devlink_param_get_from_info(devlink, info);
+ param_item = devlink_param_get_from_info(&devlink->param_list, info);
if (!param_item)
return -EINVAL;
@@ -3074,7 +3115,7 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
if (!msg)
return -ENOMEM;
- err = devlink_nl_param_fill(msg, devlink, param_item,
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
DEVLINK_CMD_PARAM_GET,
info->snd_portid, info->snd_seq, 0);
if (err) {
@@ -3085,10 +3126,12 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list,
+ struct genl_info *info,
+ enum devlink_command cmd)
{
- struct devlink *devlink = info->user_ptr[0];
enum devlink_param_type param_type;
struct devlink_param_gset_ctx ctx;
enum devlink_param_cmode cmode;
@@ -3097,7 +3140,7 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
union devlink_param_value value;
int err = 0;
- param_item = devlink_param_get_from_info(devlink, info);
+ param_item = devlink_param_get_from_info(param_list, info);
if (!param_item)
return -EINVAL;
param = param_item->param;
@@ -3137,17 +3180,28 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
return err;
}
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ devlink_param_notify(devlink, port_index, param_item, cmd);
return 0;
}
+static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+
+ return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->param_list,
+ info, DEVLINK_CMD_PARAM_NEW);
+}
+
static int devlink_param_register_one(struct devlink *devlink,
- const struct devlink_param *param)
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *param,
+ enum devlink_command cmd)
{
struct devlink_param_item *param_item;
- if (devlink_param_find_by_name(&devlink->param_list,
- param->name))
+ if (devlink_param_find_by_name(param_list, param->name))
return -EEXIST;
if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
@@ -3160,24 +3214,111 @@ static int devlink_param_register_one(struct devlink *devlink,
return -ENOMEM;
param_item->param = param;
- list_add_tail(&param_item->list, &devlink->param_list);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ list_add_tail(&param_item->list, param_list);
+ devlink_param_notify(devlink, port_index, param_item, cmd);
return 0;
}
static void devlink_param_unregister_one(struct devlink *devlink,
- const struct devlink_param *param)
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *param,
+ enum devlink_command cmd)
{
struct devlink_param_item *param_item;
- param_item = devlink_param_find_by_name(&devlink->param_list,
- param->name);
+ param_item = devlink_param_find_by_name(param_list, param->name);
WARN_ON(!param_item);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_DEL);
+ devlink_param_notify(devlink, port_index, param_item, cmd);
list_del(&param_item->list);
kfree(param_item);
}
+static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_param_item *param_item;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(devlink_port, &devlink->port_list, list) {
+ list_for_each_entry(param_item,
+ &devlink_port->param_list, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_param_fill(msg,
+ devlink_port->devlink,
+ devlink_port->index, param_item,
+ DEVLINK_CMD_PORT_PARAM_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink_param_item *param_item;
+ struct sk_buff *msg;
+ int err;
+
+ param_item = devlink_param_get_from_info(&devlink_port->param_list,
+ info);
+ if (!param_item)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_param_fill(msg, devlink_port->devlink,
+ devlink_port->index, param_item,
+ DEVLINK_CMD_PORT_PARAM_GET,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[0];
+
+ return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list, info,
+ DEVLINK_CMD_PORT_PARAM_NEW);
+}
+
static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
struct devlink *devlink,
struct devlink_snapshot *snapshot)
@@ -3499,44 +3640,56 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
u64 ret_offset, start_offset, end_offset = 0;
- struct nlattr *attrs[DEVLINK_ATTR_MAX + 1];
const struct genl_ops *ops = cb->data;
struct devlink_region *region;
struct nlattr *chunks_attr;
const char *region_name;
struct devlink *devlink;
+ struct nlattr **attrs;
bool dump = true;
void *hdr;
int err;
start_offset = *((u64 *)&cb->args[0]);
+ attrs = kmalloc_array(DEVLINK_ATTR_MAX + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
attrs, DEVLINK_ATTR_MAX, ops->policy, cb->extack);
if (err)
- goto out;
+ goto out_free;
+ mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
- if (IS_ERR(devlink))
- goto out;
+ if (IS_ERR(devlink)) {
+ err = PTR_ERR(devlink);
+ goto out_dev;
+ }
- mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
- !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID])
+ !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
+ err = -EINVAL;
goto out_unlock;
+ }
region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
region = devlink_region_get_by_name(devlink, region_name);
- if (!region)
+ if (!region) {
+ err = -EINVAL;
goto out_unlock;
+ }
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
DEVLINK_CMD_REGION_READ);
- if (!hdr)
+ if (!hdr) {
+ err = -EMSGSIZE;
goto out_unlock;
+ }
err = devlink_nl_put_handle(skb, devlink);
if (err)
@@ -3547,8 +3700,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto nla_put_failure;
chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS);
- if (!chunks_attr)
+ if (!chunks_attr) {
+ err = -EMSGSIZE;
goto nla_put_failure;
+ }
if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
@@ -3571,8 +3726,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto nla_put_failure;
/* Check if there was any progress done to prevent infinite loop */
- if (ret_offset == start_offset)
+ if (ret_offset == start_offset) {
+ err = -EINVAL;
goto nla_put_failure;
+ }
*((u64 *)&cb->args[0]) = ret_offset;
@@ -3580,6 +3737,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
genlmsg_end(skb, hdr);
mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
+ kfree(attrs);
return skb->len;
@@ -3587,8 +3745,1144 @@ nla_put_failure:
genlmsg_cancel(skb, hdr);
out_unlock:
mutex_unlock(&devlink->lock);
+out_dev:
+ mutex_unlock(&devlink_mutex);
+out_free:
+ kfree(attrs);
+ return err;
+}
+
+struct devlink_info_req {
+ struct sk_buff *msg;
+};
+
+int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
+{
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
+}
+EXPORT_SYMBOL_GPL(devlink_info_driver_name_put);
+
+int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
+{
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
+}
+EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
+
+static int devlink_info_version_put(struct devlink_info_req *req, int attr,
+ const char *version_name,
+ const char *version_value)
+{
+ struct nlattr *nest;
+ int err;
+
+ nest = nla_nest_start(req->msg, attr);
+ if (!nest)
+ return -EMSGSIZE;
+
+ err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
+ version_name);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
+ version_value);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(req->msg, nest);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(req->msg, nest);
+ return err;
+}
+
+int devlink_info_version_fixed_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
+ version_name, version_value);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
+
+int devlink_info_version_stored_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
+ version_name, version_value);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
+
+int devlink_info_version_running_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+ version_name, version_value);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
+
+static int
+devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags, struct netlink_ext_ack *extack)
+{
+ struct devlink_info_req req;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto err_cancel_msg;
+
+ req.msg = msg;
+ err = devlink->ops->info_get(devlink, &req, extack);
+ if (err)
+ goto err_cancel_msg;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ if (!devlink->ops->info_get)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+
+ mutex_lock(&devlink->lock);
+ err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->extack);
+ mutex_unlock(&devlink->lock);
+ if (err)
+ break;
+ idx++;
+ }
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+struct devlink_fmsg_item {
+ struct list_head list;
+ int attrtype;
+ u8 nla_type;
+ u16 len;
+ int value[0];
+};
+
+struct devlink_fmsg {
+ struct list_head item_list;
+};
+
+static struct devlink_fmsg *devlink_fmsg_alloc(void)
+{
+ struct devlink_fmsg *fmsg;
+
+ fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
+ if (!fmsg)
+ return NULL;
+
+ INIT_LIST_HEAD(&fmsg->item_list);
+
+ return fmsg;
+}
+
+static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
+{
+ struct devlink_fmsg_item *item, *tmp;
+
+ list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ kfree(fmsg);
+}
+
+static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
+ int attrtype)
+{
+ struct devlink_fmsg_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->attrtype = attrtype;
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
+
+static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
+}
+
+int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
+
+#define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
+
+static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
+{
+ struct devlink_fmsg_item *item;
+
+ if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
+ return -EMSGSIZE;
+
+ item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->nla_type = NLA_NUL_STRING;
+ item->len = strlen(name) + 1;
+ item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
+ memcpy(&item->value, name, item->len);
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_put_name(fmsg, name);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
+
+int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
+
+int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
+
+int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+
+static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
+ const void *value, u16 value_len,
+ u8 value_nla_type)
+{
+ struct devlink_fmsg_item *item;
+
+ if (value_len > DEVLINK_FMSG_MAX_SIZE)
+ return -EMSGSIZE;
+
+ item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->nla_type = value_nla_type;
+ item->len = value_len;
+ item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+ memcpy(&item->value, value, item->len);
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_bool_put);
+
+int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u8_put);
+
+int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
+
+int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u64_put);
+
+int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
+{
+ return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
+ NLA_NUL_STRING);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
+
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
+{
+ return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
+
+int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_bool_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
+
+int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
+
+int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
+
+int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
+
+int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
+
+int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u16 value_len)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_put(fmsg, value, value_len);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
+
+static int
+devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+{
+ switch (msg->nla_type) {
+ case NLA_FLAG:
+ case NLA_U8:
+ case NLA_U32:
+ case NLA_U64:
+ case NLA_NUL_STRING:
+ case NLA_BINARY:
+ return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
+ msg->nla_type);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+{
+ int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+ u8 tmp;
+
+ switch (msg->nla_type) {
+ case NLA_FLAG:
+ /* Always provide flag data, regardless of its value */
+ tmp = *(bool *) msg->value;
+
+ return nla_put_u8(skb, attrtype, tmp);
+ case NLA_U8:
+ return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
+ case NLA_U32:
+ return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
+ case NLA_U64:
+ return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
+ DEVLINK_ATTR_PAD);
+ case NLA_NUL_STRING:
+ return nla_put_string(skb, attrtype, (char *) &msg->value);
+ case NLA_BINARY:
+ return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
+ int *start)
+{
+ struct devlink_fmsg_item *item;
+ struct nlattr *fmsg_nlattr;
+ int i = 0;
+ int err;
+
+ fmsg_nlattr = nla_nest_start(skb, DEVLINK_ATTR_FMSG);
+ if (!fmsg_nlattr)
+ return -EMSGSIZE;
+
+ list_for_each_entry(item, &fmsg->item_list, list) {
+ if (i < *start) {
+ i++;
+ continue;
+ }
+
+ switch (item->attrtype) {
+ case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
+ case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
+ case DEVLINK_ATTR_FMSG_ARR_NEST_START:
+ case DEVLINK_ATTR_FMSG_NEST_END:
+ err = nla_put_flag(skb, item->attrtype);
+ break;
+ case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
+ err = devlink_fmsg_item_fill_type(item, skb);
+ if (err)
+ break;
+ err = devlink_fmsg_item_fill_data(item, skb);
+ break;
+ case DEVLINK_ATTR_FMSG_OBJ_NAME:
+ err = nla_put_string(skb, item->attrtype,
+ (char *) &item->value);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (!err)
+ *start = ++i;
+ else
+ break;
+ }
+
+ nla_nest_end(skb, fmsg_nlattr);
+ return err;
+}
+
+static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
+ struct genl_info *info,
+ enum devlink_command cmd, int flags)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ bool last = false;
+ int index = 0;
+ void *hdr;
+ int err;
+
+ while (!last) {
+ int tmp_index = index;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, flags | NLM_F_MULTI, cmd);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
+ if (!err)
+ last = true;
+ else if (err != -EMSGSIZE || tmp_index == index)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, hdr);
+ err = genlmsg_reply(skb, info);
+ if (err)
+ return err;
+ }
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ nlmsg_free(skb);
+ return err;
+}
+
+struct devlink_health_reporter {
+ struct list_head list;
+ void *priv;
+ const struct devlink_health_reporter_ops *ops;
+ struct devlink *devlink;
+ struct devlink_fmsg *dump_fmsg;
+ struct mutex dump_lock; /* lock parallel read/write from dump buffers */
+ u64 graceful_period;
+ bool auto_recover;
+ u8 health_state;
+ u64 dump_ts;
+ u64 error_count;
+ u64 recovery_count;
+ u64 last_recovery_ts;
+};
+
+void *
+devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
+{
+ return reporter->priv;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
+
+static struct devlink_health_reporter *
+devlink_health_reporter_find_by_name(struct devlink *devlink,
+ const char *reporter_name)
+{
+ struct devlink_health_reporter *reporter;
+
+ list_for_each_entry(reporter, &devlink->reporter_list, list)
+ if (!strcmp(reporter->ops->name, reporter_name))
+ return reporter;
+ return NULL;
+}
+
+/**
+ * devlink_health_reporter_create - create devlink health reporter
+ *
+ * @devlink: devlink
+ * @ops: ops
+ * @graceful_period: to avoid recovery loops, in msecs
+ * @auto_recover: auto recover when error occurs
+ * @priv: priv
+ */
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, bool auto_recover,
+ void *priv)
+{
+ struct devlink_health_reporter *reporter;
+
+ mutex_lock(&devlink->lock);
+ if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
+ reporter = ERR_PTR(-EEXIST);
+ goto unlock;
+ }
+
+ if (WARN_ON(auto_recover && !ops->recover) ||
+ WARN_ON(graceful_period && !ops->recover)) {
+ reporter = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
+ if (!reporter) {
+ reporter = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ reporter->priv = priv;
+ reporter->ops = ops;
+ reporter->devlink = devlink;
+ reporter->graceful_period = graceful_period;
+ reporter->auto_recover = auto_recover;
+ mutex_init(&reporter->dump_lock);
+ list_add_tail(&reporter->list, &devlink->reporter_list);
+unlock:
+ mutex_unlock(&devlink->lock);
+ return reporter;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
+
+/**
+ * devlink_health_reporter_destroy - destroy devlink health reporter
+ *
+ * @reporter: devlink health reporter to destroy
+ */
+void
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+ mutex_lock(&reporter->devlink->lock);
+ list_del(&reporter->list);
+ mutex_unlock(&reporter->devlink->lock);
+ if (reporter->dump_fmsg)
+ devlink_fmsg_free(reporter->dump_fmsg);
+ kfree(reporter);
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
+
+void
+devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state state)
+{
+ if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
+ state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
+ return;
+
+ if (reporter->health_state == state)
+ return;
+
+ reporter->health_state = state;
+ trace_devlink_health_reporter_state_update(reporter->devlink,
+ reporter->ops->name, state);
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
+
+static int
+devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ int err;
+
+ if (!reporter->ops->recover)
+ return -EOPNOTSUPP;
+
+ err = reporter->ops->recover(reporter, priv_ctx);
+ if (err)
+ return err;
+
+ reporter->recovery_count++;
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ reporter->last_recovery_ts = jiffies;
+
+ return 0;
+}
+
+static void
+devlink_health_dump_clear(struct devlink_health_reporter *reporter)
+{
+ if (!reporter->dump_fmsg)
+ return;
+ devlink_fmsg_free(reporter->dump_fmsg);
+ reporter->dump_fmsg = NULL;
+}
+
+static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ int err;
+
+ if (!reporter->ops->dump)
+ return 0;
+
+ if (reporter->dump_fmsg)
+ return 0;
+
+ reporter->dump_fmsg = devlink_fmsg_alloc();
+ if (!reporter->dump_fmsg) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
+ if (err)
+ goto dump_err;
+
+ err = reporter->ops->dump(reporter, reporter->dump_fmsg,
+ priv_ctx);
+ if (err)
+ goto dump_err;
+
+ err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
+ if (err)
+ goto dump_err;
+
+ reporter->dump_ts = jiffies;
+
+ return 0;
+
+dump_err:
+ devlink_health_dump_clear(reporter);
+ return err;
+}
+
+int devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ enum devlink_health_reporter_state prev_health_state;
+ struct devlink *devlink = reporter->devlink;
+
+ /* write a log message of the current error */
+ WARN_ON(!msg);
+ trace_devlink_health_report(devlink, reporter->ops->name, msg);
+ reporter->error_count++;
+ prev_health_state = reporter->health_state;
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ /* abort if the previous error wasn't recovered */
+ if (reporter->auto_recover &&
+ (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
+ jiffies - reporter->last_recovery_ts <
+ msecs_to_jiffies(reporter->graceful_period))) {
+ trace_devlink_health_recover_aborted(devlink,
+ reporter->ops->name,
+ reporter->health_state,
+ jiffies -
+ reporter->last_recovery_ts);
+ return -ECANCELED;
+ }
+
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ mutex_lock(&reporter->dump_lock);
+ /* store current dump of current error, for later analysis */
+ devlink_health_do_dump(reporter, priv_ctx);
+ mutex_unlock(&reporter->dump_lock);
+
+ if (reporter->auto_recover)
+ return devlink_health_reporter_recover(reporter, priv_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_health_report);
+
+static struct devlink_health_reporter *
+devlink_health_reporter_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ char *reporter_name;
+
+ if (!info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
+ return NULL;
+
+ reporter_name =
+ nla_data(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
+ return devlink_health_reporter_find_by_name(devlink, reporter_name);
+}
+
+static int
+devlink_nl_health_reporter_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_health_reporter *reporter,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ struct nlattr *reporter_attr;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+
+ reporter_attr = nla_nest_start(msg, DEVLINK_ATTR_HEALTH_REPORTER);
+ if (!reporter_attr)
+ goto genlmsg_cancel;
+ if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ reporter->ops->name))
+ goto reporter_nest_cancel;
+ if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
+ reporter->health_state))
+ goto reporter_nest_cancel;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
+ reporter->error_count, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
+ reporter->recovery_count, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
+ reporter->graceful_period,
+ DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
+ nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
+ reporter->auto_recover))
+ goto reporter_nest_cancel;
+ if (reporter->dump_fmsg &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
+ jiffies_to_msecs(reporter->dump_ts),
+ DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+
+ nla_nest_end(msg, reporter_attr);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+reporter_nest_cancel:
+ nla_nest_end(msg, reporter_attr);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ struct sk_buff *msg;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_health_reporter_fill(msg, devlink, reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ info->snd_portid, info->snd_seq,
+ 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_health_reporter *reporter;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(reporter, &devlink->reporter_list,
+ list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_health_reporter_fill(msg, devlink,
+ reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int
+devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->recover &&
+ (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]))
+ return -EOPNOTSUPP;
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
+ reporter->graceful_period =
+ nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
+ reporter->auto_recover =
+ nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
+
+ return 0;
+}
+
+static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ return devlink_health_reporter_recover(reporter, NULL);
+}
+
+static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ struct devlink_fmsg *fmsg;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->diagnose)
+ return -EOPNOTSUPP;
+
+ fmsg = devlink_fmsg_alloc();
+ if (!fmsg)
+ return -ENOMEM;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ goto out;
+
+ err = reporter->ops->diagnose(reporter, fmsg);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_snd(fmsg, info,
+ DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
+
+out:
+ devlink_fmsg_free(fmsg);
+ return err;
+}
+
+static int devlink_nl_cmd_health_reporter_dump_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->dump)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&reporter->dump_lock);
+ err = devlink_health_do_dump(reporter, NULL);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_snd(reporter->dump_fmsg, info,
+ DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET, 0);
+
out:
+ mutex_unlock(&reporter->dump_lock);
+ return err;
+}
+
+static int
+devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->dump)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&reporter->dump_lock);
+ devlink_health_dump_clear(reporter);
+ mutex_unlock(&reporter->dump_lock);
return 0;
}
@@ -3617,6 +4911,11 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
};
static const struct genl_ops devlink_nl_ops[] = {
@@ -3816,6 +5115,21 @@ static const struct genl_ops devlink_nl_ops[] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
+ .cmd = DEVLINK_CMD_PORT_PARAM_GET,
+ .doit = devlink_nl_cmd_port_param_get_doit,
+ .dumpit = devlink_nl_cmd_port_param_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_SET,
+ .doit = devlink_nl_cmd_port_param_set_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
.cmd = DEVLINK_CMD_REGION_GET,
.doit = devlink_nl_cmd_region_get_doit,
.dumpit = devlink_nl_cmd_region_get_dumpit,
@@ -3837,6 +5151,66 @@ static const struct genl_ops devlink_nl_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
+ {
+ .cmd = DEVLINK_CMD_INFO_GET,
+ .doit = devlink_nl_cmd_info_get_doit,
+ .dumpit = devlink_nl_cmd_info_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
+ .doit = devlink_nl_cmd_health_reporter_get_doit,
+ .dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
+ .doit = devlink_nl_cmd_health_reporter_set_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+ .doit = devlink_nl_cmd_health_reporter_recover_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+ .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+ .doit = devlink_nl_cmd_health_reporter_dump_get_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+ .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
+ {
+ .cmd = DEVLINK_CMD_FLASH_UPDATE,
+ .doit = devlink_nl_cmd_flash_update,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
};
static struct genl_family devlink_nl_family __ro_after_init = {
@@ -3866,6 +5240,9 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
{
struct devlink *devlink;
+ if (WARN_ON(!ops))
+ return NULL;
+
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
if (!devlink)
return NULL;
@@ -3877,6 +5254,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->resource_list);
INIT_LIST_HEAD(&devlink->param_list);
INIT_LIST_HEAD(&devlink->region_list);
+ INIT_LIST_HEAD(&devlink->reporter_list);
mutex_init(&devlink->lock);
return devlink;
}
@@ -3886,6 +5264,7 @@ EXPORT_SYMBOL_GPL(devlink_alloc);
* devlink_register - Register devlink instance
*
* @devlink: devlink
+ * @dev: parent device
*/
int devlink_register(struct devlink *devlink, struct device *dev)
{
@@ -3919,6 +5298,14 @@ EXPORT_SYMBOL_GPL(devlink_unregister);
*/
void devlink_free(struct devlink *devlink)
{
+ WARN_ON(!list_empty(&devlink->reporter_list));
+ WARN_ON(!list_empty(&devlink->region_list));
+ WARN_ON(!list_empty(&devlink->param_list));
+ WARN_ON(!list_empty(&devlink->resource_list));
+ WARN_ON(!list_empty(&devlink->dpipe_table_list));
+ WARN_ON(!list_empty(&devlink->sb_list));
+ WARN_ON(!list_empty(&devlink->port_list));
+
kfree(devlink);
}
EXPORT_SYMBOL_GPL(devlink_free);
@@ -3928,7 +5315,7 @@ EXPORT_SYMBOL_GPL(devlink_free);
*
* @devlink: devlink
* @devlink_port: devlink port
- * @port_index
+ * @port_index: driver-specific numerical identifier of the port
*
* Register devlink port with provided port index. User can use
* any indexing, even hw-related one. devlink_port structure
@@ -3949,6 +5336,7 @@ int devlink_port_register(struct devlink *devlink,
devlink_port->index = port_index;
devlink_port->registered = true;
list_add_tail(&devlink_port->list, &devlink->port_list);
+ INIT_LIST_HEAD(&devlink_port->param_list);
mutex_unlock(&devlink->lock);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
return 0;
@@ -4257,13 +5645,10 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
*
* @devlink: devlink
* @resource_name: resource's name
- * @top_hierarchy: top hierarchy
- * @reload_required: reload is required for new configuration to
- * apply
* @resource_size: resource's size
* @resource_id: resource's id
- * @parent_reosurce_id: resource's parent id
- * @size params: size parameters
+ * @parent_resource_id: resource's parent id
+ * @size_params: size parameters
*/
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
@@ -4466,18 +5851,23 @@ out:
}
EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
-/**
- * devlink_params_register - register configuration parameters
- *
- * @devlink: devlink
- * @params: configuration parameters array
- * @params_count: number of parameters provided
- *
- * Register the configuration parameters supported by the driver.
- */
-int devlink_params_register(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
+static int devlink_param_verify(const struct devlink_param *param)
+{
+ if (!param || !param->name || !param->supported_cmodes)
+ return -EINVAL;
+ if (param->generic)
+ return devlink_param_generic_verify(param);
+ else
+ return devlink_param_driver_verify(param);
+}
+
+static int __devlink_params_register(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *params,
+ size_t params_count,
+ enum devlink_command reg_cmd,
+ enum devlink_command unreg_cmd)
{
const struct devlink_param *param = params;
int i;
@@ -4485,20 +5875,12 @@ int devlink_params_register(struct devlink *devlink,
mutex_lock(&devlink->lock);
for (i = 0; i < params_count; i++, param++) {
- if (!param || !param->name || !param->supported_cmodes) {
- err = -EINVAL;
+ err = devlink_param_verify(param);
+ if (err)
goto rollback;
- }
- if (param->generic) {
- err = devlink_param_generic_verify(param);
- if (err)
- goto rollback;
- } else {
- err = devlink_param_driver_verify(param);
- if (err)
- goto rollback;
- }
- err = devlink_param_register_one(devlink, param);
+
+ err = devlink_param_register_one(devlink, port_index,
+ param_list, param, reg_cmd);
if (err)
goto rollback;
}
@@ -4510,11 +5892,48 @@ rollback:
if (!i)
goto unlock;
for (param--; i > 0; i--, param--)
- devlink_param_unregister_one(devlink, param);
+ devlink_param_unregister_one(devlink, port_index, param_list,
+ param, unreg_cmd);
unlock:
mutex_unlock(&devlink->lock);
return err;
}
+
+static void __devlink_params_unregister(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *params,
+ size_t params_count,
+ enum devlink_command cmd)
+{
+ const struct devlink_param *param = params;
+ int i;
+
+ mutex_lock(&devlink->lock);
+ for (i = 0; i < params_count; i++, param++)
+ devlink_param_unregister_one(devlink, 0, param_list, param,
+ cmd);
+ mutex_unlock(&devlink->lock);
+}
+
+/**
+ * devlink_params_register - register configuration parameters
+ *
+ * @devlink: devlink
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ *
+ * Register the configuration parameters supported by the driver.
+ */
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return __devlink_params_register(devlink, 0, &devlink->param_list,
+ params, params_count,
+ DEVLINK_CMD_PARAM_NEW,
+ DEVLINK_CMD_PARAM_DEL);
+}
EXPORT_SYMBOL_GPL(devlink_params_register);
/**
@@ -4527,36 +5946,103 @@ void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count)
{
- const struct devlink_param *param = params;
- int i;
-
- mutex_lock(&devlink->lock);
- for (i = 0; i < params_count; i++, param++)
- devlink_param_unregister_one(devlink, param);
- mutex_unlock(&devlink->lock);
+ return __devlink_params_unregister(devlink, 0, &devlink->param_list,
+ params, params_count,
+ DEVLINK_CMD_PARAM_DEL);
}
EXPORT_SYMBOL_GPL(devlink_params_unregister);
/**
- * devlink_param_driverinit_value_get - get configuration parameter
- * value for driver initializing
+ * devlink_params_publish - publish configuration parameters
*
* @devlink: devlink
- * @param_id: parameter ID
- * @init_val: value of parameter in driverinit configuration mode
*
- * This function should be used by the driver to get driverinit
- * configuration for initialization after reload command.
+ * Publish previously registered configuration parameters.
*/
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val)
+void devlink_params_publish(struct devlink *devlink)
{
struct devlink_param_item *param_item;
- if (!devlink->ops || !devlink->ops->reload)
- return -EOPNOTSUPP;
+ list_for_each_entry(param_item, &devlink->param_list, list) {
+ if (param_item->published)
+ continue;
+ param_item->published = true;
+ devlink_param_notify(devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_NEW);
+ }
+}
+EXPORT_SYMBOL_GPL(devlink_params_publish);
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+/**
+ * devlink_params_unpublish - unpublish configuration parameters
+ *
+ * @devlink: devlink
+ *
+ * Unpublish previously registered configuration parameters.
+ */
+void devlink_params_unpublish(struct devlink *devlink)
+{
+ struct devlink_param_item *param_item;
+
+ list_for_each_entry(param_item, &devlink->param_list, list) {
+ if (!param_item->published)
+ continue;
+ param_item->published = false;
+ devlink_param_notify(devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_DEL);
+ }
+}
+EXPORT_SYMBOL_GPL(devlink_params_unpublish);
+
+/**
+ * devlink_port_params_register - register port configuration parameters
+ *
+ * @devlink_port: devlink port
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ *
+ * Register the configuration parameters supported by the port.
+ */
+int devlink_port_params_register(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return __devlink_params_register(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list, params,
+ params_count,
+ DEVLINK_CMD_PORT_PARAM_NEW,
+ DEVLINK_CMD_PORT_PARAM_DEL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_params_register);
+
+/**
+ * devlink_port_params_unregister - unregister port configuration
+ * parameters
+ *
+ * @devlink_port: devlink port
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ */
+void devlink_port_params_unregister(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return __devlink_params_unregister(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list,
+ params, params_count,
+ DEVLINK_CMD_PORT_PARAM_DEL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_params_unregister);
+
+static int
+__devlink_param_driverinit_value_get(struct list_head *param_list, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(param_list, param_id);
if (!param_item)
return -EINVAL;
@@ -4572,6 +6058,54 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
return 0;
}
+
+static int
+__devlink_param_driverinit_value_set(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list, u32 param_id,
+ union devlink_param_value init_val,
+ enum devlink_command cmd)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(param_list, param_id);
+ if (!param_item)
+ return -EINVAL;
+
+ if (!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT))
+ return -EOPNOTSUPP;
+
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+ else
+ param_item->driverinit_value = init_val;
+ param_item->driverinit_value_valid = true;
+
+ devlink_param_notify(devlink, port_index, param_item, cmd);
+ return 0;
+}
+
+/**
+ * devlink_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter in driverinit configuration mode
+ *
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
+ */
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ if (!devlink->ops->reload)
+ return -EOPNOTSUPP;
+
+ return __devlink_param_driverinit_value_get(&devlink->param_list,
+ param_id, init_val);
+}
EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
/**
@@ -4589,26 +6123,61 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val)
{
- struct devlink_param_item *param_item;
+ return __devlink_param_driverinit_value_set(devlink, 0,
+ &devlink->param_list,
+ param_id, init_val,
+ DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
- if (!param_item)
- return -EINVAL;
+/**
+ * devlink_port_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
+ *
+ * @devlink_port: devlink_port
+ * @param_id: parameter ID
+ * @init_val: value of parameter in driverinit configuration mode
+ *
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
+ */
+int devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value *init_val)
+{
+ struct devlink *devlink = devlink_port->devlink;
- if (!devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT))
+ if (!devlink->ops->reload)
return -EOPNOTSUPP;
- if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
- strcpy(param_item->driverinit_value.vstr, init_val.vstr);
- else
- param_item->driverinit_value = init_val;
- param_item->driverinit_value_valid = true;
+ return __devlink_param_driverinit_value_get(&devlink_port->param_list,
+ param_id, init_val);
+}
+EXPORT_SYMBOL_GPL(devlink_port_param_driverinit_value_get);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
- return 0;
+/**
+ * devlink_port_param_driverinit_value_set - set value of configuration
+ * parameter for driverinit
+ * configuration mode
+ *
+ * @devlink_port: devlink_port
+ * @param_id: parameter ID
+ * @init_val: value of parameter to set for driverinit configuration mode
+ *
+ * This function should be used by the driver to set driverinit
+ * configuration mode default value.
+ */
+int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value init_val)
+{
+ return __devlink_param_driverinit_value_set(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list,
+ param_id, init_val,
+ DEVLINK_CMD_PORT_PARAM_NEW);
}
-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
+EXPORT_SYMBOL_GPL(devlink_port_param_driverinit_value_set);
/**
* devlink_param_value_changed - notify devlink on a parameter's value
@@ -4621,7 +6190,6 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
* This function should be used by the driver to notify devlink on value
* change, excluding driverinit configuration mode.
* For driverinit configuration mode driver should use the function
- * devlink_param_driverinit_value_set() instead.
*/
void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
@@ -4630,11 +6198,38 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
WARN_ON(!param_item);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
}
EXPORT_SYMBOL_GPL(devlink_param_value_changed);
/**
+ * devlink_port_param_value_changed - notify devlink on a parameter's value
+ * change. Should be called by the driver
+ * right after the change.
+ *
+ * @devlink_port: devlink_port
+ * @param_id: parameter ID
+ *
+ * This function should be used by the driver to notify devlink on value
+ * change, excluding driverinit configuration mode.
+ * For driverinit configuration mode driver should use the function
+ * devlink_port_param_driverinit_value_set() instead.
+ */
+void devlink_port_param_value_changed(struct devlink_port *devlink_port,
+ u32 param_id)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink_port->param_list,
+ param_id);
+ WARN_ON(!param_item);
+
+ devlink_param_notify(devlink_port->devlink, devlink_port->index,
+ param_item, DEVLINK_CMD_PORT_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_port_param_value_changed);
+
+/**
* devlink_param_value_str_fill - Safely fill-up the string preventing
* from overflow of the preallocated buffer
*
@@ -4750,7 +6345,7 @@ EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
* Multiple snapshots can be created on a region.
* The @snapshot_id should be obtained using the getter function.
*
- * @devlink_region: devlink region of the snapshot
+ * @region: devlink region of the snapshot
* @data_len: size of snapshot data
* @data: snapshot data
* @snapshot_id: snapshot id to be created
@@ -4803,20 +6398,93 @@ unlock:
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
-static int __init devlink_module_init(void)
+static void __devlink_compat_running_version(struct devlink *devlink,
+ char *buf, size_t len)
{
- return genl_register_family(&devlink_nl_family);
+ const struct nlattr *nlattr;
+ struct devlink_info_req req;
+ struct sk_buff *msg;
+ int rem, err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ req.msg = msg;
+ err = devlink->ops->info_get(devlink, &req, NULL);
+ if (err)
+ goto free_msg;
+
+ nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
+ const struct nlattr *kv;
+ int rem_kv;
+
+ if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
+ continue;
+
+ nla_for_each_nested(kv, nlattr, rem_kv) {
+ if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
+ continue;
+
+ strlcat(buf, nla_data(kv), len);
+ strlcat(buf, " ", len);
+ }
+ }
+free_msg:
+ nlmsg_free(msg);
+}
+
+void devlink_compat_running_version(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct devlink *devlink;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ mutex_lock(&devlink_mutex);
+ devlink = netdev_to_devlink(dev);
+ if (!devlink || !devlink->ops->info_get)
+ goto unlock_list;
+
+ mutex_lock(&devlink->lock);
+ __devlink_compat_running_version(devlink, buf, len);
+ mutex_unlock(&devlink->lock);
+unlock_list:
+ mutex_unlock(&devlink_mutex);
+
+ rtnl_lock();
+ dev_put(dev);
}
-static void __exit devlink_module_exit(void)
+int devlink_compat_flash_update(struct net_device *dev, const char *file_name)
{
- genl_unregister_family(&devlink_nl_family);
+ struct devlink *devlink;
+ int ret = -EOPNOTSUPP;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ mutex_lock(&devlink_mutex);
+ devlink = netdev_to_devlink(dev);
+ if (!devlink || !devlink->ops->flash_update)
+ goto unlock_list;
+
+ mutex_lock(&devlink->lock);
+ ret = devlink->ops->flash_update(devlink, file_name, NULL, NULL);
+ mutex_unlock(&devlink->lock);
+unlock_list:
+ mutex_unlock(&devlink_mutex);
+
+ rtnl_lock();
+ dev_put(dev);
+
+ return ret;
}
-module_init(devlink_module_init);
-module_exit(devlink_module_exit);
+static int __init devlink_init(void)
+{
+ return genl_register_family(&devlink_nl_family);
+}
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
-MODULE_DESCRIPTION("Network physical device Netlink interface");
-MODULE_ALIAS_GENL_FAMILY(DEVLINK_GENL_NAME);
+subsys_initcall(devlink_init);
diff --git a/net/core/dst.c b/net/core/dst.c
index 81ccf20e2826..a263309df115 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -98,8 +98,12 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
struct dst_entry *dst;
if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops))
+ if (ops->gc(ops)) {
+ printk_ratelimited(KERN_NOTICE "Route cache is full: "
+ "consider increasing sysctl "
+ "net.ipv[4|6].route.max_size.\n");
return NULL;
+ }
}
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d05402868575..d4918ffddda8 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -27,7 +27,9 @@
#include <linux/rtnetlink.h>
#include <linux/sched/signal.h>
#include <linux/net.h>
+#include <net/devlink.h>
#include <net/xdp_sock.h>
+#include <net/flow_offload.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -793,11 +795,20 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops->get_regs_len)
- info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_regs_len) {
+ int ret = ops->get_regs_len(dev);
+
+ if (ret > 0)
+ info.regdump_len = ret;
+ }
+
if (ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
+ if (!info.fw_version[0])
+ devlink_compat_running_version(dev, info.fw_version,
+ sizeof(info.fw_version));
+
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
@@ -1337,15 +1348,15 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
return -EFAULT;
reglen = ops->get_regs_len(dev);
+ if (reglen <= 0)
+ return reglen;
+
if (regs.len > reglen)
regs.len = reglen;
- regbuf = NULL;
- if (reglen) {
- regbuf = vzalloc(reglen);
- if (!regbuf)
- return -ENOMEM;
- }
+ regbuf = vzalloc(reglen);
+ if (!regbuf)
+ return -ENOMEM;
ops->get_regs(dev, &regs, regbuf);
@@ -1706,7 +1717,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+ struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM };
if (!dev->ethtool_ops->get_pauseparam)
return -EOPNOTSUPP;
@@ -2025,11 +2036,10 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
if (copy_from_user(&efl, useraddr, sizeof(efl)))
return -EFAULT;
+ efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
if (!dev->ethtool_ops->flash_device)
- return -EOPNOTSUPP;
-
- efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+ return devlink_compat_flash_update(dev, efl.data);
return dev->ethtool_ops->flash_device(dev, &efl);
}
@@ -2493,7 +2503,7 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
+ struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM };
int rc;
if (!dev->ethtool_ops->get_fecparam)
@@ -2808,3 +2818,241 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return rc;
}
+
+struct ethtool_rx_flow_key {
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_ip ip;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_eth_addrs eth_addrs;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct ethtool_rx_flow_match {
+ struct flow_dissector dissector;
+ struct ethtool_rx_flow_key key;
+ struct ethtool_rx_flow_key mask;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
+{
+ const struct ethtool_rx_flow_spec *fs = input->fs;
+ static struct in6_addr zero_addr = {};
+ struct ethtool_rx_flow_match *match;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_action_entry *act;
+
+ flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) +
+ sizeof(struct ethtool_rx_flow_match), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ /* ethtool_rx supports only one single action per rule. */
+ flow->rule = flow_rule_alloc(1);
+ if (!flow->rule) {
+ kfree(flow);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ match = (struct ethtool_rx_flow_match *)flow->priv;
+ flow->rule->match.dissector = &match->dissector;
+ flow->rule->match.mask = &match->mask;
+ flow->rule->match.key = &match->key;
+
+ match->mask.basic.n_proto = htons(0xffff);
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+
+ match->key.basic.n_proto = htons(ETH_P_IP);
+
+ v4_spec = &fs->h_u.tcp_ip4_spec;
+ v4_m_spec = &fs->m_u.tcp_ip4_spec;
+
+ if (v4_m_spec->ip4src) {
+ match->key.ipv4.src = v4_spec->ip4src;
+ match->mask.ipv4.src = v4_m_spec->ip4src;
+ }
+ if (v4_m_spec->ip4dst) {
+ match->key.ipv4.dst = v4_spec->ip4dst;
+ match->mask.ipv4.dst = v4_m_spec->ip4dst;
+ }
+ if (v4_m_spec->ip4src ||
+ v4_m_spec->ip4dst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, ipv4);
+ }
+ if (v4_m_spec->psrc) {
+ match->key.tp.src = v4_spec->psrc;
+ match->mask.tp.src = v4_m_spec->psrc;
+ }
+ if (v4_m_spec->pdst) {
+ match->key.tp.dst = v4_spec->pdst;
+ match->mask.tp.dst = v4_m_spec->pdst;
+ }
+ if (v4_m_spec->psrc ||
+ v4_m_spec->pdst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
+ offsetof(struct ethtool_rx_flow_key, tp);
+ }
+ if (v4_m_spec->tos) {
+ match->key.ip.tos = v4_spec->tos;
+ match->mask.ip.tos = v4_m_spec->tos;
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IP);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
+ offsetof(struct ethtool_rx_flow_key, ip);
+ }
+ }
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+
+ match->key.basic.n_proto = htons(ETH_P_IPV6);
+
+ v6_spec = &fs->h_u.tcp_ip6_spec;
+ v6_m_spec = &fs->m_u.tcp_ip6_spec;
+ if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ memcpy(&match->key.ipv6.src, v6_spec->ip6src,
+ sizeof(match->key.ipv6.src));
+ memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
+ sizeof(match->mask.ipv6.src));
+ }
+ if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
+ memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
+ sizeof(match->key.ipv6.dst));
+ memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
+ sizeof(match->mask.ipv6.dst));
+ }
+ if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
+ memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, ipv6);
+ }
+ if (v6_m_spec->psrc) {
+ match->key.tp.src = v6_spec->psrc;
+ match->mask.tp.src = v6_m_spec->psrc;
+ }
+ if (v6_m_spec->pdst) {
+ match->key.tp.dst = v6_spec->pdst;
+ match->mask.tp.dst = v6_m_spec->pdst;
+ }
+ if (v6_m_spec->psrc ||
+ v6_m_spec->pdst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
+ offsetof(struct ethtool_rx_flow_key, tp);
+ }
+ if (v6_m_spec->tclass) {
+ match->key.ip.tos = v6_spec->tclass;
+ match->mask.ip.tos = v6_m_spec->tclass;
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IP);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
+ offsetof(struct ethtool_rx_flow_key, ip);
+ }
+ }
+ break;
+ default:
+ ethtool_rx_flow_rule_destroy(flow);
+ return ERR_PTR(-EINVAL);
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ match->key.basic.ip_proto = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ match->key.basic.ip_proto = IPPROTO_UDP;
+ break;
+ }
+ match->mask.basic.ip_proto = 0xff;
+
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] =
+ offsetof(struct ethtool_rx_flow_key, basic);
+
+ if (fs->flow_type & FLOW_EXT) {
+ const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+ const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+
+ if (ext_m_spec->vlan_etype &&
+ ext_m_spec->vlan_tci) {
+ match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype;
+ match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype;
+
+ match->key.vlan.vlan_id =
+ ntohs(ext_h_spec->vlan_tci) & 0x0fff;
+ match->mask.vlan.vlan_id =
+ ntohs(ext_m_spec->vlan_tci) & 0x0fff;
+
+ match->key.vlan.vlan_priority =
+ (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
+ match->mask.vlan.vlan_priority =
+ (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13;
+
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_VLAN);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
+ offsetof(struct ethtool_rx_flow_key, vlan);
+ }
+ }
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+ const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+
+ memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest,
+ ETH_ALEN);
+ memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest,
+ ETH_ALEN);
+
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, eth_addrs);
+ }
+
+ act = &flow->rule->action.entries[0];
+ switch (fs->ring_cookie) {
+ case RX_CLS_FLOW_DISC:
+ act->id = FLOW_ACTION_DROP;
+ break;
+ case RX_CLS_FLOW_WAKE:
+ act->id = FLOW_ACTION_WAKE;
+ break;
+ default:
+ act->id = FLOW_ACTION_QUEUE;
+ if (fs->flow_type & FLOW_RSS)
+ act->queue.ctx = input->rss_ctx;
+
+ act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ break;
+ }
+
+ return flow;
+}
+EXPORT_SYMBOL(ethtool_rx_flow_rule_create);
+
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow)
+{
+ kfree(flow->rule);
+ kfree(flow);
+}
+EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy);
diff --git a/net/core/filter.c b/net/core/filter.c
index 35c6933c2622..5ceba98069d4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -73,6 +73,7 @@
#include <linux/seg6_local.h>
#include <net/seg6.h>
#include <net/seg6_local.h>
+#include <net/lwtunnel.h>
/**
* sk_filter_trim_cap - run a packet through a socket filter
@@ -296,22 +297,18 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
break;
case SKF_AD_VLAN_TAG:
- case SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
offsetof(struct sk_buff, vlan_tci));
- if (skb_field == SKF_AD_VLAN_TAG) {
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
- ~VLAN_TAG_PRESENT);
- } else {
- /* dst_reg >>= 12 */
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
- /* dst_reg &= 1 */
+ break;
+ case SKF_AD_VLAN_TAG_PRESENT:
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
- }
break;
}
@@ -467,7 +464,8 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
bool ldx_off_ok = offset <= S16_MAX;
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
- *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
+ if (offset)
+ *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
*insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
size, 2 + endian + (!ldx_off_ok * 2));
if (ldx_off_ok) {
@@ -1796,6 +1794,20 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
.arg2_type = ARG_ANYTHING,
};
+BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
+{
+ sk = sk_to_full_sk(sk);
+
+ return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_sk_fullsock_proto = {
+ .func = bpf_sk_fullsock,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+};
+
static inline int sk_skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
@@ -2023,18 +2035,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
- /* skb->mac_len is not set on normal egress */
- unsigned int mlen = skb->network_header - skb->mac_header;
+ unsigned int mlen = skb_network_offset(skb);
- __skb_pull(skb, mlen);
+ if (mlen) {
+ __skb_pull(skb, mlen);
- /* At ingress, the mac header has already been pulled once.
- * At egress, skb_pospull_rcsum has to be done in case that
- * the skb is originated from ingress (i.e. a forwarded skb)
- * to ensure that rcsum starts at net header.
- */
- if (!skb_at_tc_ingress(skb))
- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+ * the skb is originated from ingress (i.e. a forwarded skb)
+ * to ensure that rcsum starts at net header.
+ */
+ if (!skb_at_tc_ingress(skb))
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ }
skb_pop_mac_header(skb);
skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ?
@@ -2428,6 +2441,174 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = {
.arg4_type = ARG_ANYTHING,
};
+static void sk_msg_shift_left(struct sk_msg *msg, int i)
+{
+ int prev;
+
+ do {
+ prev = i;
+ sk_msg_iter_var_next(i);
+ msg->sg.data[prev] = msg->sg.data[i];
+ } while (i != msg->sg.end);
+
+ sk_msg_iter_prev(msg, end);
+}
+
+static void sk_msg_shift_right(struct sk_msg *msg, int i)
+{
+ struct scatterlist tmp, sge;
+
+ sk_msg_iter_next(msg, end);
+ sge = sk_msg_elem_cpy(msg, i);
+ sk_msg_iter_var_next(i);
+ tmp = sk_msg_elem_cpy(msg, i);
+
+ while (i != msg->sg.end) {
+ msg->sg.data[i] = sge;
+ sk_msg_iter_var_next(i);
+ sge = tmp;
+ tmp = sk_msg_elem_cpy(msg, i);
+ }
+}
+
+BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ u32, len, u64, flags)
+{
+ u32 i = 0, l, space, offset = 0;
+ u64 last = start + len;
+ int pop;
+
+ if (unlikely(flags))
+ return -EINVAL;
+
+ /* First find the starting scatterlist element */
+ i = msg->sg.start;
+ do {
+ l = sk_msg_elem(msg, i)->length;
+
+ if (start < offset + l)
+ break;
+ offset += l;
+ sk_msg_iter_var_next(i);
+ } while (i != msg->sg.end);
+
+ /* Bounds checks: start and pop must be inside message */
+ if (start >= offset + l || last >= msg->sg.size)
+ return -EINVAL;
+
+ space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
+
+ pop = len;
+ /* --------------| offset
+ * -| start |-------- len -------|
+ *
+ * |----- a ----|-------- pop -------|----- b ----|
+ * |______________________________________________| length
+ *
+ *
+ * a: region at front of scatter element to save
+ * b: region at back of scatter element to save when length > A + pop
+ * pop: region to pop from element, same as input 'pop' here will be
+ * decremented below per iteration.
+ *
+ * Two top-level cases to handle when start != offset, first B is non
+ * zero and second B is zero corresponding to when a pop includes more
+ * than one element.
+ *
+ * Then if B is non-zero AND there is no space allocate space and
+ * compact A, B regions into page. If there is space shift ring to
+ * the rigth free'ing the next element in ring to place B, leaving
+ * A untouched except to reduce length.
+ */
+ if (start != offset) {
+ struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
+ int a = start;
+ int b = sge->length - pop - a;
+
+ sk_msg_iter_var_next(i);
+
+ if (pop < sge->length - a) {
+ if (space) {
+ sge->length = a;
+ sk_msg_shift_right(msg, i);
+ nsge = sk_msg_elem(msg, i);
+ get_page(sg_page(sge));
+ sg_set_page(nsge,
+ sg_page(sge),
+ b, sge->offset + pop + a);
+ } else {
+ struct page *page, *orig;
+ u8 *to, *from;
+
+ page = alloc_pages(__GFP_NOWARN |
+ __GFP_COMP | GFP_ATOMIC,
+ get_order(a + b));
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ sge->length = a;
+ orig = sg_page(sge);
+ from = sg_virt(sge);
+ to = page_address(page);
+ memcpy(to, from, a);
+ memcpy(to + a, from + a + pop, b);
+ sg_set_page(sge, page, a + b, 0);
+ put_page(orig);
+ }
+ pop = 0;
+ } else if (pop >= sge->length - a) {
+ sge->length = a;
+ pop -= (sge->length - a);
+ }
+ }
+
+ /* From above the current layout _must_ be as follows,
+ *
+ * -| offset
+ * -| start
+ *
+ * |---- pop ---|---------------- b ------------|
+ * |____________________________________________| length
+ *
+ * Offset and start of the current msg elem are equal because in the
+ * previous case we handled offset != start and either consumed the
+ * entire element and advanced to the next element OR pop == 0.
+ *
+ * Two cases to handle here are first pop is less than the length
+ * leaving some remainder b above. Simply adjust the element's layout
+ * in this case. Or pop >= length of the element so that b = 0. In this
+ * case advance to next element decrementing pop.
+ */
+ while (pop) {
+ struct scatterlist *sge = sk_msg_elem(msg, i);
+
+ if (pop < sge->length) {
+ sge->length -= pop;
+ sge->offset += pop;
+ pop = 0;
+ } else {
+ pop -= sge->length;
+ sk_msg_shift_left(msg, i);
+ }
+ sk_msg_iter_var_next(i);
+ }
+
+ sk_mem_uncharge(msg->sk, len - pop);
+ msg->sg.size -= (len - pop);
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_msg_pop_data_proto = {
+ .func = bpf_msg_pop_data,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
return task_get_classid(skb);
@@ -2623,8 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2665,8 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2791,8 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2821,8 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -3908,6 +4085,26 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
+ struct bpf_map *, map, u64, flags, void *, data, u64, size)
+{
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+ return -EINVAL;
+
+ return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
+}
+
+static const struct bpf_func_proto bpf_sockopt_event_output_proto = {
+ .func = bpf_sockopt_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
@@ -3926,14 +4123,20 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
/* Only some socketops are supported */
switch (optname) {
case SO_RCVBUF:
+ val = min_t(u32, val, sysctl_rmem_max);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_SNDBUF:
+ val = min_t(u32, val, sysctl_wmem_max);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
case SO_MAX_PACING_RATE: /* 32bit version */
+ if (val != ~0U)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
@@ -3947,7 +4150,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_rcvlowat = val ? : 1;
break;
case SO_MARK:
- sk->sk_mark = val;
+ if (sk->sk_mark != val) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
default:
ret = -EINVAL;
@@ -4018,7 +4224,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
/* Only some options are supported */
switch (optname) {
case TCP_BPF_IW:
- if (val <= 0 || tp->data_segs_out > 0)
+ if (val <= 0 || tp->data_segs_out > tp->syn_data)
ret = -EINVAL;
else
tp->snd_cwnd = val;
@@ -4608,7 +4814,15 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
}
#endif /* CONFIG_IPV6_SEG6_BPF */
-BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
+#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
+static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
+ bool ingress)
+{
+ return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
+}
+#endif
+
+BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
u32, len)
{
switch (type) {
@@ -4617,13 +4831,40 @@ BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
case BPF_LWT_ENCAP_SEG6_INLINE:
return bpf_push_seg6_encap(skb, type, hdr, len);
#endif
+#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
+ case BPF_LWT_ENCAP_IP:
+ return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
+
+BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
+ void *, hdr, u32, len)
+{
+ switch (type) {
+#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
+ case BPF_LWT_ENCAP_IP:
+ return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
+#endif
default:
return -EINVAL;
}
}
-static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
- .func = bpf_lwt_push_encap,
+static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
+ .func = bpf_lwt_in_push_encap,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_CONST_SIZE
+};
+
+static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
+ .func = bpf_lwt_xmit_push_encap,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
@@ -4823,49 +5064,90 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
};
#endif /* CONFIG_IPV6_SEG6_BPF */
+#define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \
+do { \
+ switch (si->off) { \
+ case offsetof(md_type, snd_cwnd): \
+ CONVERT(snd_cwnd); break; \
+ case offsetof(md_type, srtt_us): \
+ CONVERT(srtt_us); break; \
+ case offsetof(md_type, snd_ssthresh): \
+ CONVERT(snd_ssthresh); break; \
+ case offsetof(md_type, rcv_nxt): \
+ CONVERT(rcv_nxt); break; \
+ case offsetof(md_type, snd_nxt): \
+ CONVERT(snd_nxt); break; \
+ case offsetof(md_type, snd_una): \
+ CONVERT(snd_una); break; \
+ case offsetof(md_type, mss_cache): \
+ CONVERT(mss_cache); break; \
+ case offsetof(md_type, ecn_flags): \
+ CONVERT(ecn_flags); break; \
+ case offsetof(md_type, rate_delivered): \
+ CONVERT(rate_delivered); break; \
+ case offsetof(md_type, rate_interval_us): \
+ CONVERT(rate_interval_us); break; \
+ case offsetof(md_type, packets_out): \
+ CONVERT(packets_out); break; \
+ case offsetof(md_type, retrans_out): \
+ CONVERT(retrans_out); break; \
+ case offsetof(md_type, total_retrans): \
+ CONVERT(total_retrans); break; \
+ case offsetof(md_type, segs_in): \
+ CONVERT(segs_in); break; \
+ case offsetof(md_type, data_segs_in): \
+ CONVERT(data_segs_in); break; \
+ case offsetof(md_type, segs_out): \
+ CONVERT(segs_out); break; \
+ case offsetof(md_type, data_segs_out): \
+ CONVERT(data_segs_out); break; \
+ case offsetof(md_type, lost_out): \
+ CONVERT(lost_out); break; \
+ case offsetof(md_type, sacked_out): \
+ CONVERT(sacked_out); break; \
+ case offsetof(md_type, bytes_received): \
+ CONVERT(bytes_received); break; \
+ case offsetof(md_type, bytes_acked): \
+ CONVERT(bytes_acked); break; \
+ } \
+} while (0)
+
#ifdef CONFIG_INET
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
- struct sk_buff *skb, u8 family, u8 proto)
+ int dif, int sdif, u8 family, u8 proto)
{
bool refcounted = false;
struct sock *sk = NULL;
- int dif = 0;
-
- if (skb->dev)
- dif = skb->dev->ifindex;
if (family == AF_INET) {
__be32 src4 = tuple->ipv4.saddr;
__be32 dst4 = tuple->ipv4.daddr;
- int sdif = inet_sdif(skb);
if (proto == IPPROTO_TCP)
- sk = __inet_lookup(net, &tcp_hashinfo, skb, 0,
+ sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport,
dif, sdif, &refcounted);
else
sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport,
- dif, sdif, &udp_table, skb);
+ dif, sdif, &udp_table, NULL);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
- u16 hnum = ntohs(tuple->ipv6.dport);
- int sdif = inet6_sdif(skb);
if (proto == IPPROTO_TCP)
- sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
+ sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
src6, tuple->ipv6.sport,
- dst6, hnum,
+ dst6, ntohs(tuple->ipv6.dport),
dif, sdif, &refcounted);
else if (likely(ipv6_bpf_stub))
sk = ipv6_bpf_stub->udp6_lib_lookup(net,
src6, tuple->ipv6.sport,
- dst6, hnum,
+ dst6, tuple->ipv6.dport,
dif, sdif,
- &udp_table, skb);
+ &udp_table, NULL);
#endif
}
@@ -4882,31 +5164,34 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
* callers to satisfy BPF_CALL declarations.
*/
static unsigned long
-bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
- u8 proto, u64 netns_id, u64 flags)
+__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+ u64 flags)
{
- struct net *caller_net;
struct sock *sk = NULL;
u8 family = AF_UNSPEC;
struct net *net;
+ int sdif;
family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
- if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
+ if (unlikely(family == AF_UNSPEC || flags ||
+ !((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
- if (skb->dev)
- caller_net = dev_net(skb->dev);
+ if (family == AF_INET)
+ sdif = inet_sdif(skb);
else
- caller_net = sock_net(skb->sk);
- if (netns_id) {
+ sdif = inet6_sdif(skb);
+
+ if ((s32)netns_id < 0) {
+ net = caller_net;
+ sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
+ } else {
net = get_net_ns_by_id(caller_net, netns_id);
if (unlikely(!net))
goto out;
- sk = sk_lookup(net, tuple, skb, family, proto);
+ sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
put_net(net);
- } else {
- net = caller_net;
- sk = sk_lookup(net, tuple, skb, family, proto);
}
if (sk)
@@ -4915,6 +5200,25 @@ out:
return (unsigned long) sk;
}
+static unsigned long
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ u8 proto, u64 netns_id, u64 flags)
+{
+ struct net *caller_net;
+ int ifindex;
+
+ if (skb->dev) {
+ caller_net = dev_net(skb->dev);
+ ifindex = skb->dev->ifindex;
+ } else {
+ caller_net = sock_net(skb->sk);
+ ifindex = 0;
+ }
+
+ return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex,
+ proto, netns_id, flags);
+}
+
BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
@@ -4964,6 +5268,186 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCKET,
};
+
+BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ struct net *caller_net = dev_net(ctx->rxq->dev);
+ int ifindex = ctx->rxq->dev->ifindex;
+
+ return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
+ IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+ .func = bpf_xdp_sk_lookup_udp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ struct net *caller_net = dev_net(ctx->rxq->dev);
+ int ifindex = ctx->rxq->dev->ifindex;
+
+ return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
+ IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
+ .func = bpf_xdp_sk_lookup_tcp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
+ IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
+ .func = bpf_sock_addr_sk_lookup_tcp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
+ IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
+ .func = bpf_sock_addr_sk_lookup_udp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked))
+ return false;
+
+ if (off % size != 0)
+ return false;
+
+ switch (off) {
+ case offsetof(struct bpf_tcp_sock, bytes_received):
+ case offsetof(struct bpf_tcp_sock, bytes_acked):
+ return size == sizeof(__u64);
+ default:
+ return size == sizeof(__u32);
+ }
+}
+
+u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog, u32 *target_size)
+{
+ struct bpf_insn *insn = insn_buf;
+
+#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \
+ FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct tcp_sock, FIELD)); \
+ } while (0)
+
+ CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock,
+ BPF_TCP_SOCK_GET_COMMON);
+
+ if (insn > insn_buf)
+ return insn - insn_buf;
+
+ switch (si->off) {
+ case offsetof(struct bpf_tcp_sock, rtt_min):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+ sizeof(struct minmax));
+ BUILD_BUG_ON(sizeof(struct minmax) <
+ sizeof(struct minmax_sample));
+
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+ offsetof(struct tcp_sock, rtt_min) +
+ offsetof(struct minmax_sample, v));
+ break;
+ }
+
+ return insn - insn_buf;
+}
+
+BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
+{
+ sk = sk_to_full_sk(sk);
+
+ if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
+ return (unsigned long)sk;
+
+ return (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_tcp_sock_proto = {
+ .func = bpf_tcp_sock,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+};
+
+BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
+{
+ unsigned int iphdr_len;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_IP))
+ iphdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+ iphdr_len = sizeof(struct ipv6hdr);
+ else
+ return 0;
+
+ if (skb_headlen(skb) < iphdr_len)
+ return 0;
+
+ if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
+ return 0;
+
+ return INET_ECN_set_ce(skb);
+}
+
+static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
+ .func = bpf_skb_ecn_set_ce,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
#endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func)
@@ -4986,13 +5470,15 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_xdp_adjust_meta ||
func == bpf_msg_pull_data ||
func == bpf_msg_push_data ||
+ func == bpf_msg_pop_data ||
func == bpf_xdp_adjust_tail ||
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
func == bpf_lwt_seg6_store_bytes ||
func == bpf_lwt_seg6_adjust_srh ||
func == bpf_lwt_seg6_action ||
#endif
- func == bpf_lwt_push_encap)
+ func == bpf_lwt_in_push_encap ||
+ func == bpf_lwt_xmit_push_encap)
return true;
return false;
@@ -5024,10 +5510,20 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_tail_call_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
+ default:
+ break;
+ }
+
+ if (!capable(CAP_SYS_ADMIN))
+ return NULL;
+
+ switch (func_id) {
+ case BPF_FUNC_spin_lock:
+ return &bpf_spin_lock_proto;
+ case BPF_FUNC_spin_unlock:
+ return &bpf_spin_unlock_proto;
case BPF_FUNC_trace_printk:
- if (capable(CAP_SYS_ADMIN))
- return bpf_get_trace_printk_proto();
- /* else: fall through */
+ return bpf_get_trace_printk_proto();
default:
return NULL;
}
@@ -5070,6 +5566,14 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_cookie_sock_addr_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sock_addr_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sock_addr_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+#endif /* CONFIG_INET */
default:
return bpf_base_func_proto(func_id);
}
@@ -5098,6 +5602,14 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
switch (func_id) {
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_sk_fullsock:
+ return &bpf_sk_fullsock_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_tcp_sock:
+ return &bpf_tcp_sock_proto;
+ case BPF_FUNC_skb_ecn_set_ce:
+ return &bpf_skb_ecn_set_ce_proto;
+#endif
default:
return sk_filter_func_proto(func_id, prog);
}
@@ -5169,6 +5681,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_uid_proto;
case BPF_FUNC_fib_lookup:
return &bpf_skb_fib_lookup_proto;
+ case BPF_FUNC_sk_fullsock:
+ return &bpf_sk_fullsock_proto;
#ifdef CONFIG_XFRM
case BPF_FUNC_skb_get_xfrm_state:
return &bpf_skb_get_xfrm_state_proto;
@@ -5186,6 +5700,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_lookup_udp_proto;
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
+ case BPF_FUNC_tcp_sock:
+ return &bpf_tcp_sock_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -5214,6 +5730,14 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_xdp_adjust_tail_proto;
case BPF_FUNC_fib_lookup:
return &bpf_xdp_fib_lookup_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_xdp_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_xdp_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+#endif
default:
return bpf_base_func_proto(func_id);
}
@@ -5240,6 +5764,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_cookie_sock_ops_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_perf_event_output:
+ return &bpf_sockopt_event_output_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5264,8 +5790,8 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_msg_pull_data_proto;
case BPF_FUNC_msg_push_data:
return &bpf_msg_push_data_proto;
- case BPF_FUNC_get_local_storage:
- return &bpf_get_local_storage_proto;
+ case BPF_FUNC_msg_pop_data:
+ return &bpf_msg_pop_data_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5296,8 +5822,6 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_redirect_map_proto;
case BPF_FUNC_sk_redirect_hash:
return &bpf_sk_redirect_hash_proto;
- case BPF_FUNC_get_local_storage:
- return &bpf_get_local_storage_proto;
#ifdef CONFIG_INET
case BPF_FUNC_sk_lookup_tcp:
return &bpf_sk_lookup_tcp_proto;
@@ -5354,7 +5878,7 @@ lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_lwt_push_encap:
- return &bpf_lwt_push_encap_proto;
+ return &bpf_lwt_in_push_encap_proto;
default:
return lwt_out_func_proto(func_id, prog);
}
@@ -5390,6 +5914,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_l4_csum_replace_proto;
case BPF_FUNC_set_hash_invalid:
return &bpf_set_hash_invalid_proto;
+ case BPF_FUNC_lwt_push_encap:
+ return &bpf_lwt_xmit_push_encap_proto;
default:
return lwt_out_func_proto(func_id, prog);
}
@@ -5440,10 +5966,19 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (size != size_default)
return false;
break;
- case bpf_ctx_range(struct __sk_buff, flow_keys):
- if (size != sizeof(struct bpf_flow_keys *))
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ if (size != sizeof(__u64))
+ return false;
+ break;
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ if (size != sizeof(__u64))
return false;
break;
+ case offsetof(struct __sk_buff, sk):
+ if (type == BPF_WRITE || size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
+ break;
default:
/* Only narrow read access allowed for now. */
if (type == BPF_WRITE) {
@@ -5469,8 +6004,10 @@ static bool sk_filter_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, data_end):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -5494,15 +6031,26 @@ static bool cg_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
+ case bpf_ctx_range(struct __sk_buff, data):
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ if (!capable(CAP_SYS_ADMIN))
+ return false;
+ break;
}
+
if (type == BPF_WRITE) {
switch (off) {
case bpf_ctx_range(struct __sk_buff, mark):
case bpf_ctx_range(struct __sk_buff, priority):
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
break;
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ if (!capable(CAP_SYS_ADMIN))
+ return false;
+ break;
default:
return false;
}
@@ -5529,7 +6077,9 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -5600,31 +6150,44 @@ full_access:
return true;
}
-static bool __sock_filter_check_size(int off, int size,
+bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
- const int size_default = sizeof(__u32);
-
switch (off) {
- case bpf_ctx_range(struct bpf_sock, src_ip4):
- case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
- bpf_ctx_record_field_size(info, size_default);
- return bpf_ctx_narrow_access_ok(off, size, size_default);
+ case bpf_ctx_range_till(struct bpf_sock, type, priority):
+ return false;
+ default:
+ return bpf_sock_is_valid_access(off, size, type, info);
}
-
- return size == size_default;
}
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
+ const int size_default = sizeof(__u32);
+
if (off < 0 || off >= sizeof(struct bpf_sock))
return false;
if (off % size != 0)
return false;
- if (!__sock_filter_check_size(off, size, info))
- return false;
- return true;
+
+ switch (off) {
+ case offsetof(struct bpf_sock, state):
+ case offsetof(struct bpf_sock, family):
+ case offsetof(struct bpf_sock, type):
+ case offsetof(struct bpf_sock, protocol):
+ case offsetof(struct bpf_sock, dst_port):
+ case offsetof(struct bpf_sock, src_port):
+ case bpf_ctx_range(struct bpf_sock, src_ip4):
+ case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
+ case bpf_ctx_range(struct bpf_sock, dst_ip4):
+ case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
+ bpf_ctx_record_field_size(info, size_default);
+ return bpf_ctx_narrow_access_ok(off, size, size_default);
+ }
+
+ return size == size_default;
}
static bool sock_filter_is_valid_access(int off, int size,
@@ -5638,6 +6201,15 @@ static bool sock_filter_is_valid_access(int off, int size,
prog->expected_attach_type);
}
+static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
+ const struct bpf_prog *prog)
+{
+ /* Neither direct read nor direct write requires any preliminary
+ * action.
+ */
+ return 0;
+}
+
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog, int drop_verdict)
{
@@ -5730,6 +6302,8 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, priority):
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, queue_mapping):
break;
default:
return false;
@@ -5746,7 +6320,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@@ -5948,7 +6522,9 @@ static bool sk_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -5984,6 +6560,9 @@ static bool sk_msg_is_valid_access(int off, int size,
if (type == BPF_WRITE)
return false;
+ if (off % size != 0)
+ return false;
+
switch (off) {
case offsetof(struct sk_msg_md, data):
info->reg_type = PTR_TO_PACKET;
@@ -5995,16 +6574,20 @@ static bool sk_msg_is_valid_access(int off, int size,
if (size != sizeof(__u64))
return false;
break;
- default:
+ case bpf_ctx_range(struct sk_msg_md, family):
+ case bpf_ctx_range(struct sk_msg_md, remote_ip4):
+ case bpf_ctx_range(struct sk_msg_md, local_ip4):
+ case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
+ case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
+ case bpf_ctx_range(struct sk_msg_md, remote_port):
+ case bpf_ctx_range(struct sk_msg_md, local_port):
+ case bpf_ctx_range(struct sk_msg_md, size):
if (size != sizeof(__u32))
return false;
- }
-
- if (off < 0 || off >= sizeof(struct sk_msg_md))
- return false;
- if (off % size != 0)
+ break;
+ default:
return false;
-
+ }
return true;
}
@@ -6029,12 +6612,14 @@ static bool flow_dissector_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
info->reg_type = PTR_TO_FLOW_KEYS;
break;
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -6123,25 +6708,34 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, queue_mapping):
- *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
- bpf_target_off(struct sk_buff, queue_mapping, 2,
- target_size));
+ if (type == BPF_WRITE) {
+ *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
+ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ queue_mapping,
+ 2, target_size));
+ } else {
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ queue_mapping,
+ 2, target_size));
+ }
break;
case offsetof(struct __sk_buff, vlan_present):
- case offsetof(struct __sk_buff, vlan_tci):
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+ *target_size = 1;
+ *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
+ PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
+ break;
+ case offsetof(struct __sk_buff, vlan_tci):
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
bpf_target_off(struct sk_buff, vlan_tci, 2,
target_size));
- if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
- *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
- ~VLAN_TAG_PRESENT);
- } else {
- *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
- *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
- }
break;
case offsetof(struct __sk_buff, cb[0]) ...
@@ -6344,6 +6938,61 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
si->src_reg, off);
break;
+
+ case offsetof(struct __sk_buff, tstamp):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_STX_MEM(BPF_DW,
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ tstamp, 8,
+ target_size));
+ else
+ *insn++ = BPF_LDX_MEM(BPF_DW,
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ tstamp, 8,
+ target_size));
+ break;
+
+ case offsetof(struct __sk_buff, gso_segs):
+ /* si->dst_reg = skb_shinfo(SKB); */
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, head));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ BPF_REG_AX, si->src_reg,
+ offsetof(struct sk_buff, end));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
+#else
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, end));
+#endif
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
+ si->dst_reg, si->dst_reg,
+ bpf_target_off(struct skb_shared_info,
+ gso_segs, 2,
+ target_size));
+ break;
+ case offsetof(struct __sk_buff, wire_len):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
+
+ off = si->off;
+ off -= offsetof(struct __sk_buff, wire_len);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, pkt_len);
+ *target_size = 4;
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
+ break;
+
+ case offsetof(struct __sk_buff, sk):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, sk));
+ break;
}
return insn - insn_buf;
@@ -6392,24 +7041,32 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock, family):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
-
- *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
- offsetof(struct sock, sk_family));
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct sock_common, skc_family),
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common,
+ skc_family,
+ FIELD_SIZEOF(struct sock_common,
+ skc_family),
+ target_size));
break;
case offsetof(struct bpf_sock, type):
+ BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
+ *target_size = 2;
break;
case offsetof(struct bpf_sock, protocol):
+ BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
+ *target_size = 1;
break;
case offsetof(struct bpf_sock, src_ip4):
@@ -6421,6 +7078,15 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
target_size));
break;
+ case offsetof(struct bpf_sock, dst_ip4):
+ *insn++ = BPF_LDX_MEM(
+ BPF_SIZE(si->code), si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common, skc_daddr,
+ FIELD_SIZEOF(struct sock_common,
+ skc_daddr),
+ target_size));
+ break;
+
case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
off = si->off;
@@ -6439,6 +7105,23 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
#endif
break;
+ case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
+#if IS_ENABLED(CONFIG_IPV6)
+ off = si->off;
+ off -= offsetof(struct bpf_sock, dst_ip6[0]);
+ *insn++ = BPF_LDX_MEM(
+ BPF_SIZE(si->code), si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common,
+ skc_v6_daddr.s6_addr32[0],
+ FIELD_SIZEOF(struct sock_common,
+ skc_v6_daddr.s6_addr32[0]),
+ target_size) + off);
+#else
+ *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
+ *target_size = 4;
+#endif
+ break;
+
case offsetof(struct bpf_sock, src_port):
*insn++ = BPF_LDX_MEM(
BPF_FIELD_SIZEOF(struct sock_common, skc_num),
@@ -6448,6 +7131,26 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
skc_num),
target_size));
break;
+
+ case offsetof(struct bpf_sock, dst_port):
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common, skc_dport,
+ FIELD_SIZEOF(struct sock_common,
+ skc_dport),
+ target_size));
+ break;
+
+ case offsetof(struct bpf_sock, state):
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct sock_common, skc_state),
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common, skc_state,
+ FIELD_SIZEOF(struct sock_common,
+ skc_state),
+ target_size));
+ break;
}
return insn - insn_buf;
@@ -6695,6 +7398,85 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn = insn_buf;
int off;
+/* Helper macro for adding read access to tcp_sock or sock fields. */
+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
+ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
+ OBJ_FIELD), \
+ si->dst_reg, si->dst_reg, \
+ offsetof(OBJ, OBJ_FIELD)); \
+ } while (0)
+
+#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
+ SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
+
+/* Helper macro for adding write access to tcp_sock or sock fields.
+ * The macro is called with two registers, dst_reg which contains a pointer
+ * to ctx (context) and src_reg which contains the value that should be
+ * stored. However, we need an additional register since we cannot overwrite
+ * dst_reg because it may be used later in the program.
+ * Instead we "borrow" one of the other register. We first save its value
+ * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
+ * it at the end of the macro.
+ */
+#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ int reg = BPF_REG_9; \
+ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
+ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
+ reg, si->src_reg, \
+ offsetof(OBJ, OBJ_FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } while (0)
+
+#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
+ do { \
+ if (TYPE == BPF_WRITE) \
+ SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
+ else \
+ SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
+ } while (0)
+
+ CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops,
+ SOCK_OPS_GET_TCP_SOCK_FIELD);
+
+ if (insn > insn_buf)
+ return insn - insn_buf;
+
switch (si->off) {
case offsetof(struct bpf_sock_ops, op) ...
offsetof(struct bpf_sock_ops, replylong[3]):
@@ -6852,175 +7634,15 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
FIELD_SIZEOF(struct minmax_sample, t));
break;
-/* Helper macro for adding read access to tcp_sock or sock fields. */
-#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
- do { \
- BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
- FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, \
- is_fullsock), \
- si->dst_reg, si->src_reg, \
- offsetof(struct bpf_sock_ops_kern, \
- is_fullsock)); \
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, sk),\
- si->dst_reg, si->src_reg, \
- offsetof(struct bpf_sock_ops_kern, sk));\
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
- OBJ_FIELD), \
- si->dst_reg, si->dst_reg, \
- offsetof(OBJ, OBJ_FIELD)); \
- } while (0)
-
-/* Helper macro for adding write access to tcp_sock or sock fields.
- * The macro is called with two registers, dst_reg which contains a pointer
- * to ctx (context) and src_reg which contains the value that should be
- * stored. However, we need an additional register since we cannot overwrite
- * dst_reg because it may be used later in the program.
- * Instead we "borrow" one of the other register. We first save its value
- * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
- * it at the end of the macro.
- */
-#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
- do { \
- int reg = BPF_REG_9; \
- BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
- FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
- if (si->dst_reg == reg || si->src_reg == reg) \
- reg--; \
- if (si->dst_reg == reg || si->src_reg == reg) \
- reg--; \
- *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
- offsetof(struct bpf_sock_ops_kern, \
- temp)); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, \
- is_fullsock), \
- reg, si->dst_reg, \
- offsetof(struct bpf_sock_ops_kern, \
- is_fullsock)); \
- *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, sk),\
- reg, si->dst_reg, \
- offsetof(struct bpf_sock_ops_kern, sk));\
- *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
- reg, si->src_reg, \
- offsetof(OBJ, OBJ_FIELD)); \
- *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
- offsetof(struct bpf_sock_ops_kern, \
- temp)); \
- } while (0)
-
-#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
- do { \
- if (TYPE == BPF_WRITE) \
- SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
- else \
- SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
- } while (0)
-
- case offsetof(struct bpf_sock_ops, snd_cwnd):
- SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, srtt_us):
- SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
- break;
-
case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
struct tcp_sock);
break;
- case offsetof(struct bpf_sock_ops, snd_ssthresh):
- SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, rcv_nxt):
- SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, snd_nxt):
- SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, snd_una):
- SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, mss_cache):
- SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, ecn_flags):
- SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, rate_delivered):
- SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, rate_interval_us):
- SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, packets_out):
- SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, retrans_out):
- SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, total_retrans):
- SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, segs_in):
- SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, data_segs_in):
- SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, segs_out):
- SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, data_segs_out):
- SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, lost_out):
- SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, sacked_out):
- SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
- break;
-
case offsetof(struct bpf_sock_ops, sk_txhash):
SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
struct sock, type);
break;
-
- case offsetof(struct bpf_sock_ops, bytes_received):
- SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, bytes_acked):
- SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
- break;
-
}
return insn - insn_buf;
}
@@ -7060,6 +7682,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
int off;
#endif
+ /* convert ctx uses the fact sg element is first in struct */
+ BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
+
switch (si->off) {
case offsetof(struct sk_msg_md, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
@@ -7172,6 +7797,12 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_num));
break;
+
+ case offsetof(struct sk_msg_md, size):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_msg_sg, size));
+ break;
}
return insn - insn_buf;
@@ -7204,6 +7835,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
.get_func_proto = xdp_func_proto,
.is_valid_access = xdp_is_valid_access,
.convert_ctx_access = xdp_convert_ctx_access,
+ .gen_prologue = bpf_noop_prologue,
};
const struct bpf_prog_ops xdp_prog_ops = {
@@ -7302,6 +7934,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = {
.get_func_proto = sk_msg_func_proto,
.is_valid_access = sk_msg_is_valid_access,
.convert_ctx_access = sk_msg_convert_ctx_access,
+ .gen_prologue = bpf_noop_prologue,
};
const struct bpf_prog_ops sk_msg_prog_ops = {
@@ -7314,6 +7947,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
};
const struct bpf_prog_ops flow_dissector_prog_ops = {
+ .test_run = bpf_prog_test_run_flow_dissector,
};
int sk_detach_filter(struct sock *sk)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 676f3ad629f9..bb1a54747d64 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -683,6 +683,46 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
}
}
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+ const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ struct bpf_flow_keys *flow_keys)
+{
+ struct bpf_skb_data_end cb_saved;
+ struct bpf_skb_data_end *cb;
+ u32 result;
+
+ /* Note that even though the const qualifier is discarded
+ * throughout the execution of the BPF program, all changes(the
+ * control block) are reverted after the BPF program returns.
+ * Therefore, __skb_flow_dissect does not alter the skb.
+ */
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+
+ /* Save Control Block */
+ memcpy(&cb_saved, cb, sizeof(cb_saved));
+ memset(cb, 0, sizeof(*cb));
+
+ /* Pass parameters to the BPF program */
+ memset(flow_keys, 0, sizeof(*flow_keys));
+ cb->qdisc_cb.flow_keys = flow_keys;
+ flow_keys->nhoff = skb_network_offset(skb);
+ flow_keys->thoff = flow_keys->nhoff;
+
+ bpf_compute_data_pointers((struct sk_buff *)skb);
+ result = BPF_PROG_RUN(prog, skb);
+
+ /* Restore state */
+ memcpy(cb, &cb_saved, sizeof(cb_saved));
+
+ flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+ flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
+ flow_keys->nhoff, skb->len);
+
+ return result == BPF_OK;
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -714,7 +754,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_vlan *key_vlan;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
- struct bpf_prog *attached = NULL;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
@@ -754,49 +793,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
- rcu_read_lock();
if (skb) {
+ struct bpf_flow_keys flow_keys;
+ struct bpf_prog *attached = NULL;
+
+ rcu_read_lock();
+
if (skb->dev)
attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
else if (skb->sk)
attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
else
WARN_ON_ONCE(1);
- }
- if (attached) {
- /* Note that even though the const qualifier is discarded
- * throughout the execution of the BPF program, all changes(the
- * control block) are reverted after the BPF program returns.
- * Therefore, __skb_flow_dissect does not alter the skb.
- */
- struct bpf_flow_keys flow_keys = {};
- struct bpf_skb_data_end cb_saved;
- struct bpf_skb_data_end *cb;
- u32 result;
-
- cb = (struct bpf_skb_data_end *)skb->cb;
-
- /* Save Control Block */
- memcpy(&cb_saved, cb, sizeof(cb_saved));
- memset(cb, 0, sizeof(cb_saved));
-
- /* Pass parameters to the BPF program */
- cb->qdisc_cb.flow_keys = &flow_keys;
- flow_keys.nhoff = nhoff;
- bpf_compute_data_pointers((struct sk_buff *)skb);
- result = BPF_PROG_RUN(attached, skb);
-
- /* Restore state */
- memcpy(cb, &cb_saved, sizeof(cb_saved));
-
- __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
- target_container);
- key_control->thoff = min_t(u16, key_control->thoff, skb->len);
+ if (attached) {
+ ret = __skb_flow_bpf_dissect(attached, skb,
+ flow_dissector,
+ &flow_keys);
+ __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+ target_container);
+ rcu_read_unlock();
+ return ret;
+ }
rcu_read_unlock();
- return result == BPF_OK;
}
- rcu_read_unlock();
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
@@ -952,8 +972,7 @@ proto_again:
if (!vlan) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
- key_vlan->vlan_priority =
- (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
+ key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
} else {
key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
VLAN_VID_MASK;
@@ -1166,8 +1185,8 @@ ip_proto_again:
break;
}
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS)) {
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
+ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
new file mode 100644
index 000000000000..c3a00eac4804
--- /dev/null
+++ b/net/core/flow_offload.c
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <net/flow_offload.h>
+
+struct flow_rule *flow_rule_alloc(unsigned int num_actions)
+{
+ struct flow_rule *rule;
+
+ rule = kzalloc(sizeof(struct flow_rule) +
+ sizeof(struct flow_action_entry) * num_actions,
+ GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->action.num_entries = num_actions;
+
+ return rule;
+}
+EXPORT_SYMBOL(flow_rule_alloc);
+
+#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
+ const struct flow_match *__m = &(__rule)->match; \
+ struct flow_dissector *__d = (__m)->dissector; \
+ \
+ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
+ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+ struct flow_match_basic *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
+}
+EXPORT_SYMBOL(flow_rule_match_basic);
+
+void flow_rule_match_control(const struct flow_rule *rule,
+ struct flow_match_control *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
+}
+EXPORT_SYMBOL(flow_rule_match_control);
+
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+ struct flow_match_eth_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_eth_addrs);
+
+void flow_rule_match_vlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
+}
+EXPORT_SYMBOL(flow_rule_match_vlan);
+
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
+
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
+
+void flow_rule_match_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ip);
+
+void flow_rule_match_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ports);
+
+void flow_rule_match_tcp(const struct flow_rule *rule,
+ struct flow_match_tcp *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_tcp);
+
+void flow_rule_match_icmp(const struct flow_rule *rule,
+ struct flow_match_icmp *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_icmp);
+
+void flow_rule_match_mpls(const struct flow_rule *rule,
+ struct flow_match_mpls *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_mpls);
+
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+ struct flow_match_control *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_control);
+
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
+
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
+
+void flow_rule_match_enc_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ip);
+
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ports);
+
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+ struct flow_match_enc_keyid *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_keyid);
+
+void flow_rule_match_enc_opts(const struct flow_rule *rule,
+ struct flow_match_enc_opts *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_opts);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 9bf1b9ad1780..ac679f74ba47 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -291,7 +291,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
- qstats->qlen = 0;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
@@ -307,7 +306,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
if (cpu) {
__gnet_stats_copy_queue_cpu(qstats, cpu);
} else {
- qstats->qlen = q->qlen;
qstats->backlog = q->backlog;
qstats->drops = q->drops;
qstats->requeues = q->requeues;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index 4b54e5f107c6..acf45ddbe924 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+ napi_disable(&cell->napi);
netif_napi_del(&cell->napi);
__skb_queue_purge(&cell->napi_skbs);
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437f7106..cf2f8897ca19 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -16,6 +16,8 @@
#include <linux/types.h>
#include <linux/bpf.h>
#include <net/lwtunnel.h>
+#include <net/gre.h>
+#include <net/ip6_route.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
@@ -55,6 +57,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
switch (ret) {
case BPF_OK:
+ case BPF_LWT_REROUTE:
break;
case BPF_REDIRECT:
@@ -63,6 +66,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
lwt->name ? : "<unknown>");
ret = BPF_OK;
} else {
+ skb_reset_mac_header(skb);
ret = skb_do_redirect(skb);
if (ret == 0)
ret = BPF_REDIRECT;
@@ -86,6 +90,30 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
return ret;
}
+static int bpf_lwt_input_reroute(struct sk_buff *skb)
+{
+ int err = -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb_dst(skb)->dev);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ err = ipv6_stub->ipv6_route_input(skb);
+ } else {
+ err = -EAFNOSUPPORT;
+ }
+
+ if (err)
+ goto err;
+ return dst_input(skb);
+
+err:
+ kfree_skb(skb);
+ return err;
+}
+
static int bpf_input(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -97,11 +125,11 @@ static int bpf_input(struct sk_buff *skb)
ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
if (ret < 0)
return ret;
+ if (ret == BPF_LWT_REROUTE)
+ return bpf_lwt_input_reroute(skb);
}
if (unlikely(!dst->lwtstate->orig_input)) {
- pr_warn_once("orig_input not set on dst for prog %s\n",
- bpf->out.name);
kfree_skb(skb);
return -EINVAL;
}
@@ -146,6 +174,102 @@ static int xmit_check_hhlen(struct sk_buff *skb)
return 0;
}
+static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
+{
+ struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
+ int oif = l3mdev ? l3mdev->ifindex : 0;
+ struct dst_entry *dst = NULL;
+ int err = -EAFNOSUPPORT;
+ struct sock *sk;
+ struct net *net;
+ bool ipv4;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv4 = true;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ipv4 = false;
+ else
+ goto err;
+
+ sk = sk_to_full_sk(skb->sk);
+ if (sk) {
+ if (sk->sk_bound_dev_if)
+ oif = sk->sk_bound_dev_if;
+ net = sock_net(sk);
+ } else {
+ net = dev_net(skb_dst(skb)->dev);
+ }
+
+ if (ipv4) {
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi4 fl4 = {};
+ struct rtable *rt;
+
+ fl4.flowi4_oif = oif;
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_uid = sock_net_uid(net, sk);
+ fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
+ fl4.flowi4_proto = iph->protocol;
+ fl4.daddr = iph->daddr;
+ fl4.saddr = iph->saddr;
+
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto err;
+ }
+ dst = &rt->dst;
+ } else {
+ struct ipv6hdr *iph6 = ipv6_hdr(skb);
+ struct flowi6 fl6 = {};
+
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_uid = sock_net_uid(net, sk);
+ fl6.flowlabel = ip6_flowinfo(iph6);
+ fl6.flowi6_proto = iph6->nexthdr;
+ fl6.daddr = iph6->daddr;
+ fl6.saddr = iph6->saddr;
+
+ err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
+ if (unlikely(err))
+ goto err;
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err;
+ }
+ }
+ if (unlikely(dst->error)) {
+ err = dst->error;
+ dst_release(dst);
+ goto err;
+ }
+
+ /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
+ * was done for the previous dst, so we are doing it here again, in
+ * case the new dst needs much more space. The call below is a noop
+ * if there is enough header space in skb.
+ */
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto err;
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ if (unlikely(err))
+ return err;
+
+ /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
+ return LWTUNNEL_XMIT_DONE;
+
+err:
+ kfree_skb(skb);
+ return err;
+}
+
static int bpf_xmit(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -153,11 +277,20 @@ static int bpf_xmit(struct sk_buff *skb)
bpf = bpf_lwt_lwtunnel(dst->lwtstate);
if (bpf->xmit.prog) {
+ __be16 proto = skb->protocol;
int ret;
ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
switch (ret) {
case BPF_OK:
+ /* If the header changed, e.g. via bpf_lwt_push_encap,
+ * BPF_LWT_REROUTE below should have been used if the
+ * protocol was also changed.
+ */
+ if (skb->protocol != proto) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
/* If the header was expanded, headroom might be too
* small for L2 header to come, expand as needed.
*/
@@ -168,6 +301,8 @@ static int bpf_xmit(struct sk_buff *skb)
return LWTUNNEL_XMIT_CONTINUE;
case BPF_REDIRECT:
return LWTUNNEL_XMIT_DONE;
+ case BPF_LWT_REROUTE:
+ return bpf_lwt_xmit_reroute(skb);
default:
return ret;
}
@@ -389,6 +524,133 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
.owner = THIS_MODULE,
};
+static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
+ int encap_len)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_type |= gso_type;
+ skb_decrease_gso_size(shinfo, encap_len);
+ shinfo->gso_segs = 0;
+ return 0;
+}
+
+static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
+{
+ int next_hdr_offset;
+ void *next_hdr;
+ __u8 protocol;
+
+ /* SCTP and UDP_L4 gso need more nuanced handling than what
+ * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
+ * So at the moment only TCP GSO packets are let through.
+ */
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -ENOTSUPP;
+
+ if (ipv4) {
+ protocol = ip_hdr(skb)->protocol;
+ next_hdr_offset = sizeof(struct iphdr);
+ next_hdr = skb_network_header(skb) + next_hdr_offset;
+ } else {
+ protocol = ipv6_hdr(skb)->nexthdr;
+ next_hdr_offset = sizeof(struct ipv6hdr);
+ next_hdr = skb_network_header(skb) + next_hdr_offset;
+ }
+
+ switch (protocol) {
+ case IPPROTO_GRE:
+ next_hdr_offset += sizeof(struct gre_base_hdr);
+ if (next_hdr_offset > encap_len)
+ return -EINVAL;
+
+ if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
+ return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
+ encap_len);
+ return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
+
+ case IPPROTO_UDP:
+ next_hdr_offset += sizeof(struct udphdr);
+ if (next_hdr_offset > encap_len)
+ return -EINVAL;
+
+ if (((struct udphdr *)next_hdr)->check)
+ return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
+ encap_len);
+ return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
+
+ case IPPROTO_IP:
+ case IPPROTO_IPV6:
+ if (ipv4)
+ return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
+ else
+ return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+}
+
+int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
+{
+ struct iphdr *iph;
+ bool ipv4;
+ int err;
+
+ if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
+ return -EINVAL;
+
+ /* validate protocol and length */
+ iph = (struct iphdr *)hdr;
+ if (iph->version == 4) {
+ ipv4 = true;
+ if (unlikely(len < iph->ihl * 4))
+ return -EINVAL;
+ } else if (iph->version == 6) {
+ ipv4 = false;
+ if (unlikely(len < sizeof(struct ipv6hdr)))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ingress)
+ err = skb_cow_head(skb, len + skb->mac_len);
+ else
+ err = skb_cow_head(skb,
+ len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
+ if (unlikely(err))
+ return err;
+
+ /* push the encap headers and fix pointers */
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ skb_push(skb, len);
+ if (ingress)
+ skb_postpush_rcsum(skb, iph, len);
+ skb_reset_network_header(skb);
+ memcpy(skb_network_header(skb), hdr, len);
+ bpf_compute_data_pointers(skb);
+ skb_clear_hash(skb);
+
+ if (ipv4) {
+ skb->protocol = htons(ETH_P_IP);
+ iph = ip_hdr(skb);
+
+ if (!iph->check)
+ iph->check = ip_fast_csum((unsigned char *)iph,
+ iph->ihl);
+ } else {
+ skb->protocol = htons(ETH_P_IPV6);
+ }
+
+ if (skb_is_gso(skb))
+ return handle_gso_encap(skb, ipv4, len);
+
+ return 0;
+}
+
static int __init bpf_lwt_init(void)
{
return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 0b171756453c..19b557bd294b 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -122,18 +122,18 @@ int lwtunnel_build_state(u16 encap_type,
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
- if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
+ if (likely(ops && ops->build_state && try_module_get(ops->owner)))
found = true;
+ rcu_read_unlock();
+
+ if (found) {
ret = ops->build_state(encap, family, cfg, lws, extack);
if (ret)
module_put(ops->owner);
- }
- rcu_read_unlock();
-
- /* don't rely on -EOPNOTSUPP to detect match as build_state
- * handlers could return it
- */
- if (!found) {
+ } else {
+ /* don't rely on -EOPNOTSUPP to detect match as build_state
+ * handlers could return it
+ */
NL_SET_ERR_MSG_ATTR(extack, encap,
"LWT encapsulation type not supported");
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ee605d9d8bd4..30f6fd8f68e0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -41,6 +42,8 @@
#include <linux/inetdevice.h>
#include <net/addrconf.h>
+#include <trace/events/neigh.h>
+
#define DEBUG
#define NEIGH_DEBUG 1
#define neigh_dbg(level, fmt, ...) \
@@ -101,6 +104,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
if (neigh->parms->neigh_cleanup)
neigh->parms->neigh_cleanup(neigh);
+ trace_neigh_cleanup_and_release(neigh, 0);
__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
neigh_release(neigh);
@@ -118,21 +122,77 @@ unsigned long neigh_rand_reach_time(unsigned long base)
}
EXPORT_SYMBOL(neigh_rand_reach_time);
+static void neigh_mark_dead(struct neighbour *n)
+{
+ n->dead = 1;
+ if (!list_empty(&n->gc_list)) {
+ list_del_init(&n->gc_list);
+ atomic_dec(&n->tbl->gc_entries);
+ }
+}
+
+static void neigh_update_gc_list(struct neighbour *n)
+{
+ bool on_gc_list, exempt_from_gc;
+
+ write_lock_bh(&n->tbl->lock);
+ write_lock(&n->lock);
+
+ /* remove from the gc list if new state is permanent or if neighbor
+ * is externally learned; otherwise entry should be on the gc list
+ */
+ exempt_from_gc = n->nud_state & NUD_PERMANENT ||
+ n->flags & NTF_EXT_LEARNED;
+ on_gc_list = !list_empty(&n->gc_list);
+
+ if (exempt_from_gc && on_gc_list) {
+ list_del_init(&n->gc_list);
+ atomic_dec(&n->tbl->gc_entries);
+ } else if (!exempt_from_gc && !on_gc_list) {
+ /* add entries to the tail; cleaning removes from the front */
+ list_add_tail(&n->gc_list, &n->tbl->gc_list);
+ atomic_inc(&n->tbl->gc_entries);
+ }
+
+ write_unlock(&n->lock);
+ write_unlock_bh(&n->tbl->lock);
+}
+
+static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
+ int *notify)
+{
+ bool rc = false;
+ u8 ndm_flags;
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN))
+ return rc;
+
+ ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
+ if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
+ if (ndm_flags & NTF_EXT_LEARNED)
+ neigh->flags |= NTF_EXT_LEARNED;
+ else
+ neigh->flags &= ~NTF_EXT_LEARNED;
+ rc = true;
+ *notify = 1;
+ }
+
+ return rc;
+}
-static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
- struct neighbour __rcu **np, struct neigh_table *tbl)
+static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
+ struct neigh_table *tbl)
{
bool retval = false;
write_lock(&n->lock);
- if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
- !(n->flags & flags)) {
+ if (refcount_read(&n->refcnt) == 1) {
struct neighbour *neigh;
neigh = rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock));
rcu_assign_pointer(*np, neigh);
- n->dead = 1;
+ neigh_mark_dead(n);
retval = true;
}
write_unlock(&n->lock);
@@ -158,7 +218,7 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock)))) {
if (n == ndel)
- return neigh_del(n, 0, 0, np, tbl);
+ return neigh_del(n, np, tbl);
np = &n->next;
}
return false;
@@ -166,32 +226,29 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
+ int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ unsigned long tref = jiffies - 5 * HZ;
+ struct neighbour *n, *tmp;
int shrunk = 0;
- int i;
- struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
write_lock_bh(&tbl->lock);
- nht = rcu_dereference_protected(tbl->nht,
- lockdep_is_held(&tbl->lock));
- for (i = 0; i < (1 << nht->hash_shift); i++) {
- struct neighbour *n;
- struct neighbour __rcu **np;
- np = &nht->hash_buckets[i];
- while ((n = rcu_dereference_protected(*np,
- lockdep_is_held(&tbl->lock))) != NULL) {
- /* Neighbour record may be discarded if:
- * - nobody refers to it.
- * - it is not permanent
- */
- if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
- tbl)) {
- shrunk = 1;
- continue;
- }
- np = &n->next;
+ list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
+ if (refcount_read(&n->refcnt) == 1) {
+ bool remove = false;
+
+ write_lock(&n->lock);
+ if ((n->nud_state == NUD_FAILED) ||
+ time_after(tref, n->updated))
+ remove = true;
+ write_unlock(&n->lock);
+
+ if (remove && neigh_remove_one(n, tbl))
+ shrunk++;
+ if (shrunk >= max_clean)
+ break;
}
}
@@ -260,8 +317,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
lockdep_is_held(&tbl->lock)));
write_lock(&n->lock);
neigh_del_timer(n);
- n->dead = 1;
-
+ neigh_mark_dead(n);
if (refcount_read(&n->refcnt) != 1) {
/* The most unpleasant situation.
We must destroy neighbour entry,
@@ -321,13 +377,18 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
}
EXPORT_SYMBOL(neigh_ifdown);
-static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl,
+ struct net_device *dev,
+ bool exempt_from_gc)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
- entries = atomic_inc_return(&tbl->entries) - 1;
+ if (exempt_from_gc)
+ goto do_alloc;
+
+ entries = atomic_inc_return(&tbl->gc_entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
@@ -340,6 +401,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
}
}
+do_alloc:
n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
if (!n)
goto out_entries;
@@ -358,11 +420,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
n->tbl = tbl;
refcount_set(&n->refcnt, 1);
n->dead = 1;
+ INIT_LIST_HEAD(&n->gc_list);
+
+ atomic_inc(&tbl->entries);
out:
return n;
out_entries:
- atomic_dec(&tbl->entries);
+ if (!exempt_from_gc)
+ atomic_dec(&tbl->gc_entries);
goto out;
}
@@ -381,12 +447,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
buckets = kzalloc(size, GFP_ATOMIC);
- else
+ } else {
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
+ kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
+ }
if (!buckets) {
kfree(ret);
return NULL;
@@ -406,10 +474,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
struct neighbour __rcu **buckets = nht->hash_buckets;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
kfree(buckets);
- else
+ } else {
+ kmemleak_free(buckets);
free_pages((unsigned long)buckets, get_order(size));
+ }
kfree(nht);
}
@@ -505,13 +575,15 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
}
EXPORT_SYMBOL(neigh_lookup_nodev);
-struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
- struct net_device *dev, bool want_ref)
+static struct neighbour *___neigh_create(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev,
+ bool exempt_from_gc, bool want_ref)
{
+ struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
u32 hash_val;
unsigned int key_len = tbl->key_len;
int error;
- struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
struct neigh_hash_table *nht;
if (!n) {
@@ -574,6 +646,9 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
}
n->dead = 0;
+ if (!exempt_from_gc)
+ list_add_tail(&n->gc_list, &n->tbl->gc_list);
+
if (want_ref)
neigh_hold(n);
rcu_assign_pointer(n->next,
@@ -591,6 +666,12 @@ out_neigh_release:
neigh_release(n);
goto out;
}
+
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref)
+{
+ return ___neigh_create(tbl, pkey, dev, false, want_ref);
+}
EXPORT_SYMBOL(__neigh_create);
static u32 pneigh_hash(const void *pkey, unsigned int key_len)
@@ -652,6 +733,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (!n)
goto out;
+ n->protocol = 0;
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
@@ -854,7 +936,7 @@ static void neigh_periodic_work(struct work_struct *work)
(state == NUD_FAILED ||
time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
- n->dead = 1;
+ neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
continue;
@@ -928,7 +1010,7 @@ static void neigh_probe(struct neighbour *neigh)
if (neigh->ops->solicit)
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
- kfree_skb(skb);
+ consume_skb(skb);
}
/* Called when a timer expires for a neighbour entry. */
@@ -1016,6 +1098,8 @@ out:
if (notify)
neigh_update_notify(neigh, 0);
+ trace_neigh_timer_handler(neigh, 0);
+
neigh_release(neigh);
}
@@ -1086,6 +1170,7 @@ out_unlock_bh:
else
write_unlock(&neigh->lock);
local_bh_enable();
+ trace_neigh_event_send_done(neigh, rc);
return rc;
out_dead:
@@ -1093,6 +1178,7 @@ out_dead:
goto out_unlock_bh;
write_unlock_bh(&neigh->lock);
kfree_skb(skb);
+ trace_neigh_event_send_dead(neigh, 1);
return 1;
}
EXPORT_SYMBOL(__neigh_event_send);
@@ -1137,15 +1223,19 @@ static void neigh_update_hhs(struct neighbour *neigh)
Caller MUST hold reference count on the entry.
*/
-int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- u32 flags, u32 nlmsg_pid)
+static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ u8 new, u32 flags, u32 nlmsg_pid,
+ struct netlink_ext_ack *extack)
{
+ bool ext_learn_change = false;
u8 old;
int err;
int notify = 0;
struct net_device *dev;
int update_isrouter = 0;
+ trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
+
write_lock_bh(&neigh->lock);
dev = neigh->dev;
@@ -1155,10 +1245,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
- if (neigh->dead)
+ if (neigh->dead) {
+ NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
goto out;
+ }
- neigh_update_ext_learned(neigh, flags, &notify);
+ ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
if (!(new & NUD_VALID)) {
neigh_del_timer(neigh);
@@ -1193,8 +1285,10 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
use it, otherwise discard the request.
*/
err = -EINVAL;
- if (!(old & NUD_VALID))
+ if (!(old & NUD_VALID)) {
+ NL_SET_ERR_MSG(extack, "No link layer address given");
goto out;
+ }
lladdr = neigh->ha;
}
@@ -1302,11 +1396,22 @@ out:
neigh_update_is_router(neigh, flags, &notify);
write_unlock_bh(&neigh->lock);
+ if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
+ neigh_update_gc_list(neigh);
+
if (notify)
neigh_update_notify(neigh, nlmsg_pid);
+ trace_neigh_update_done(neigh, err);
+
return err;
}
+
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ u32 flags, u32 nlmsg_pid)
+{
+ return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
+}
EXPORT_SYMBOL(neigh_update);
/* Update the neigh to listen temporarily for probe responses, even if it is
@@ -1571,6 +1676,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
unsigned long phsize;
INIT_LIST_HEAD(&tbl->parms_list);
+ INIT_LIST_HEAD(&tbl->gc_list);
list_add(&tbl->parms.list, &tbl->parms_list);
write_pnet(&tbl->parms.net, &init_net);
refcount_set(&tbl->parms.refcnt, 1);
@@ -1662,6 +1768,19 @@ static struct neigh_table *neigh_find_table(int family)
return tbl;
}
+const struct nla_policy nda_policy[NDA_MAX+1] = {
+ [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
+ [NDA_PROBES] = { .type = NLA_U32 },
+ [NDA_VLAN] = { .type = NLA_U16 },
+ [NDA_PORT] = { .type = NLA_U16 },
+ [NDA_VNI] = { .type = NLA_U32 },
+ [NDA_IFINDEX] = { .type = NLA_U32 },
+ [NDA_MASTER] = { .type = NLA_U32 },
+ [NDA_PROTOCOL] = { .type = NLA_U8 },
+};
+
static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -1678,8 +1797,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
- if (dst_attr == NULL)
+ if (!dst_attr) {
+ NL_SET_ERR_MSG(extack, "Network address not specified");
goto out;
+ }
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
@@ -1694,8 +1815,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tbl == NULL)
return -EAFNOSUPPORT;
- if (nla_len(dst_attr) < (int)tbl->key_len)
+ if (nla_len(dst_attr) < (int)tbl->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address");
goto out;
+ }
if (ndm->ndm_flags & NTF_PROXY) {
err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
@@ -1711,10 +1834,9 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- err = neigh_update(neigh, NULL, NUD_FAILED,
- NEIGH_UPDATE_F_OVERRIDE |
- NEIGH_UPDATE_F_ADMIN,
- NETLINK_CB(skb).portid);
+ err = __neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
+ NETLINK_CB(skb).portid, extack);
write_lock_bh(&tbl->lock);
neigh_release(neigh);
neigh_remove_one(neigh, tbl);
@@ -1736,16 +1858,19 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device *dev = NULL;
struct neighbour *neigh;
void *dst, *lladdr;
+ u8 protocol = 0;
int err;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nda_policy, extack);
if (err < 0)
goto out;
err = -EINVAL;
- if (tb[NDA_DST] == NULL)
+ if (!tb[NDA_DST]) {
+ NL_SET_ERR_MSG(extack, "Network address not specified");
goto out;
+ }
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
@@ -1755,19 +1880,27 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
+ if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
+ NL_SET_ERR_MSG(extack, "Invalid link address");
goto out;
+ }
}
tbl = neigh_find_table(ndm->ndm_family);
if (tbl == NULL)
return -EAFNOSUPPORT;
- if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
+ if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address");
goto out;
+ }
+
dst = nla_data(tb[NDA_DST]);
lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
+ if (tb[NDA_PROTOCOL])
+ protocol = nla_get_u8(tb[NDA_PROTOCOL]);
+
if (ndm->ndm_flags & NTF_PROXY) {
struct pneigh_entry *pn;
@@ -1775,22 +1908,30 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
pn = pneigh_lookup(tbl, net, dst, dev, 1);
if (pn) {
pn->flags = ndm->ndm_flags;
+ if (protocol)
+ pn->protocol = protocol;
err = 0;
}
goto out;
}
- if (dev == NULL)
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Device not specified");
goto out;
+ }
neigh = neigh_lookup(tbl, dst, dev);
if (neigh == NULL) {
+ bool exempt_from_gc;
+
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto out;
}
- neigh = __neigh_lookup_errno(tbl, dst, dev);
+ exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
+ ndm->ndm_flags & NTF_EXT_LEARNED;
+ neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
goto out;
@@ -1817,8 +1958,12 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
neigh_event_send(neigh, NULL);
err = 0;
} else
- err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
- NETLINK_CB(skb).portid);
+ err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
+ NETLINK_CB(skb).portid, extack);
+
+ if (protocol)
+ neigh->protocol = protocol;
+
neigh_release(neigh);
out:
@@ -2312,6 +2457,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
+ if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -2343,6 +2491,9 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
goto nla_put_failure;
+ if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -2364,7 +2515,7 @@ static bool neigh_master_filtered(struct net_device *dev, int master_idx)
if (!master_idx)
return false;
- master = netdev_master_upper_dev_get(dev);
+ master = dev ? netdev_master_upper_dev_get(dev) : NULL;
if (!master || master->ifindex != master_idx)
return true;
@@ -2373,7 +2524,7 @@ static bool neigh_master_filtered(struct net_device *dev, int master_idx)
static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
{
- if (filter_idx && dev->ifindex != filter_idx)
+ if (filter_idx && (!dev || dev->ifindex != filter_idx))
return true;
return false;
@@ -2494,16 +2645,21 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
ndm = nlmsg_data(nlh);
if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
- ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
+ ndm->ndm_state || ndm->ndm_type) {
NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
return -EINVAL;
}
+ if (ndm->ndm_flags & ~NTF_PROXY) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
+ return -EINVAL;
+ }
+
err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ nda_policy, extack);
} else {
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ nda_policy, extack);
}
if (err < 0)
return err;
@@ -2515,17 +2671,9 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
/* all new attributes should require strict_check */
switch (i) {
case NDA_IFINDEX:
- if (nla_len(tb[i]) != sizeof(u32)) {
- NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request");
- return -EINVAL;
- }
filter->dev_idx = nla_get_u32(tb[i]);
break;
case NDA_MASTER:
- if (nla_len(tb[i]) != sizeof(u32)) {
- NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request");
- return -EINVAL;
- }
filter->master_idx = nla_get_u32(tb[i]);
break;
default:
@@ -2585,6 +2733,186 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+static int neigh_valid_get_req(const struct nlmsghdr *nlh,
+ struct neigh_table **tbl,
+ void **dst, int *dev_idx, u8 *ndm_flags,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[NDA_MAX + 1];
+ struct ndmsg *ndm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
+ return -EINVAL;
+ }
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
+ ndm->ndm_type) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
+ return -EINVAL;
+ }
+
+ if (ndm->ndm_flags & ~NTF_PROXY) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ nda_policy, extack);
+ if (err < 0)
+ return err;
+
+ *ndm_flags = ndm->ndm_flags;
+ *dev_idx = ndm->ndm_ifindex;
+ *tbl = neigh_find_table(ndm->ndm_family);
+ if (*tbl == NULL) {
+ NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
+ return -EAFNOSUPPORT;
+ }
+
+ for (i = 0; i <= NDA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NDA_DST:
+ if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
+ return -EINVAL;
+ }
+ *dst = nla_data(tb[i]);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline size_t neigh_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(struct nda_cacheinfo))
+ + nla_total_size(4) /* NDA_PROBES */
+ + nla_total_size(1); /* NDA_PROTOCOL */
+}
+
+static int neigh_get_reply(struct net *net, struct neighbour *neigh,
+ u32 pid, u32 seq)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
+ if (err) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ err = rtnl_unicast(skb, net, pid);
+errout:
+ return err;
+}
+
+static inline size_t pneigh_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(1); /* NDA_PROTOCOL */
+}
+
+static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
+ u32 pid, u32 seq, struct neigh_table *tbl)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
+ if (err) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ err = rtnl_unicast(skb, net, pid);
+errout:
+ return err;
+}
+
+static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(in_skb->sk);
+ struct net_device *dev = NULL;
+ struct neigh_table *tbl = NULL;
+ struct neighbour *neigh;
+ void *dst = NULL;
+ u8 ndm_flags = 0;
+ int dev_idx = 0;
+ int err;
+
+ err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
+ extack);
+ if (err < 0)
+ return err;
+
+ if (dev_idx) {
+ dev = __dev_get_by_index(net, dev_idx);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Unknown device ifindex");
+ return -ENODEV;
+ }
+ }
+
+ if (!dst) {
+ NL_SET_ERR_MSG(extack, "Network address not specified");
+ return -EINVAL;
+ }
+
+ if (ndm_flags & NTF_PROXY) {
+ struct pneigh_entry *pn;
+
+ pn = pneigh_lookup(tbl, net, dst, dev, 0);
+ if (!pn) {
+ NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
+ return -ENOENT;
+ }
+ return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, tbl);
+ }
+
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "No device specified");
+ return -EINVAL;
+ }
+
+ neigh = neigh_lookup(tbl, dst, dev);
+ if (!neigh) {
+ NL_SET_ERR_MSG(extack, "Neighbour entry not found");
+ return -ENOENT;
+ }
+
+ err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq);
+
+ neigh_release(neigh);
+
+ return err;
+}
+
void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
{
int chain;
@@ -2631,7 +2959,7 @@ void __neigh_for_each_release(struct neigh_table *tbl,
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
- n->dead = 1;
+ neigh_mark_dead(n);
} else
np = &n->next;
write_unlock(&n->lock);
@@ -2992,15 +3320,6 @@ static const struct seq_operations neigh_stat_seq_ops = {
};
#endif /* CONFIG_PROC_FS */
-static inline size_t neigh_nlmsg_size(void)
-{
- return NLMSG_ALIGN(sizeof(struct ndmsg))
- + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
- + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
- + nla_total_size(sizeof(struct nda_cacheinfo))
- + nla_total_size(4); /* NDA_PROBES */
-}
-
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid)
{
@@ -3384,7 +3703,7 @@ static int __init neigh_init(void)
{
rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
+ rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
0);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bd67c4d0fcfd..4ff661f6f989 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -12,7 +12,6 @@
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
@@ -337,7 +336,7 @@ NETDEVICE_SHOW_RW(mtu, fmt_dec);
static int change_flags(struct net_device *dev, unsigned long new_flags)
{
- return dev_change_flags(dev, (unsigned int)new_flags);
+ return dev_change_flags(dev, (unsigned int)new_flags, NULL);
}
static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
@@ -501,16 +500,11 @@ static ssize_t phys_switch_id_show(struct device *dev,
return restart_syscall();
if (dev_isalive(netdev)) {
- struct switchdev_attr attr = {
- .orig_dev = netdev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
- };
+ struct netdev_phys_item_id ppid = { };
- ret = switchdev_port_attr_get(netdev, &attr);
+ ret = dev_get_port_parent_id(netdev, &ppid, false);
if (!ret)
- ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
- attr.u.ppid.id);
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
}
rtnl_unlock();
@@ -1342,8 +1336,7 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
if (tc < 0)
return -EINVAL;
}
- mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
- GFP_KERNEL);
+ mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
if (!mask)
return -ENOMEM;
@@ -1372,7 +1365,7 @@ out_no_maps:
rcu_read_unlock();
len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
- kfree(mask);
+ bitmap_free(mask);
return len < PAGE_SIZE ? len : -EINVAL;
}
@@ -1388,8 +1381,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
- GFP_KERNEL);
+ mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
if (!mask)
return -ENOMEM;
@@ -1397,7 +1389,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
if (err) {
- kfree(mask);
+ bitmap_free(mask);
return err;
}
@@ -1405,7 +1397,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
err = __netif_set_xps_queue(dev, mask, index, true);
cpus_read_unlock();
- kfree(mask);
+ bitmap_free(mask);
return err ? : len;
}
@@ -1547,6 +1539,9 @@ static int register_queue_kobjects(struct net_device *dev)
error:
netdev_queue_update_kobjects(dev, txq, 0);
net_rx_queue_update_kobjects(dev, rxq, 0);
+#ifdef CONFIG_SYSFS
+ kset_unregister(dev->queues_kset);
+#endif
return error;
}
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 419af6dfe29f..470b179d599e 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -43,6 +43,14 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update);
#endif
+#include <trace/events/neigh.h>
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_timer_handler);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_event_send_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_event_send_dead);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_cleanup_and_release);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index fefe72774aeb..17f36317363d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -669,6 +669,7 @@ static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
[NETNSA_NSID] = { .type = NLA_S32 },
[NETNSA_PID] = { .type = NLA_U32 },
[NETNSA_FD] = { .type = NLA_U32 },
+ [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
};
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -735,23 +736,38 @@ static int rtnl_net_get_size(void)
{
return NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ nla_total_size(sizeof(s32)) /* NETNSA_NSID */
+ + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
;
}
-static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
- int cmd, struct net *net, int nsid)
+struct net_fill_args {
+ u32 portid;
+ u32 seq;
+ int flags;
+ int cmd;
+ int nsid;
+ bool add_ref;
+ int ref_nsid;
+};
+
+static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
{
struct nlmsghdr *nlh;
struct rtgenmsg *rth;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
+ args->flags);
if (!nlh)
return -EMSGSIZE;
rth = nlmsg_data(nlh);
rth->rtgen_family = AF_UNSPEC;
- if (nla_put_s32(skb, NETNSA_NSID, nsid))
+ if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
+ goto nla_put_failure;
+
+ if (args->add_ref &&
+ nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -762,18 +778,57 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int rtnl_net_valid_getid_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETNSA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETNSA_PID:
+ case NETNSA_FD:
+ case NETNSA_NSID:
+ case NETNSA_TARGET_NSID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ struct net_fill_args fillargs = {
+ .portid = NETLINK_CB(skb).portid,
+ .seq = nlh->nlmsg_seq,
+ .cmd = RTM_NEWNSID,
+ };
+ struct net *peer, *target = net;
struct nlattr *nla;
struct sk_buff *msg;
- struct net *peer;
- int err, id;
+ int err;
- err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
if (err < 0)
return err;
if (tb[NETNSA_PID]) {
@@ -782,6 +837,11 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
} else if (tb[NETNSA_FD]) {
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
nla = tb[NETNSA_FD];
+ } else if (tb[NETNSA_NSID]) {
+ peer = get_net_ns_by_id(net, nla_get_u32(tb[NETNSA_NSID]));
+ if (!peer)
+ peer = ERR_PTR(-ENOENT);
+ nla = tb[NETNSA_NSID];
} else {
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
return -EINVAL;
@@ -793,15 +853,29 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer);
}
+ if (tb[NETNSA_TARGET_NSID]) {
+ int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
+
+ target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
+ if (IS_ERR(target)) {
+ NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
+ NL_SET_ERR_MSG(extack,
+ "Target netns reference is invalid");
+ err = PTR_ERR(target);
+ goto out;
+ }
+ fillargs.add_ref = true;
+ fillargs.ref_nsid = peernet2id(net, peer);
+ }
+
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
goto out;
}
- id = peernet2id(net, peer);
- err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
- RTM_NEWNSID, net, id);
+ fillargs.nsid = peernet2id(target, peer);
+ err = rtnl_net_fill(msg, &fillargs);
if (err < 0)
goto err_out;
@@ -811,14 +885,17 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
err_out:
nlmsg_free(msg);
out:
+ if (fillargs.add_ref)
+ put_net(target);
put_net(peer);
return err;
}
struct rtnl_net_dump_cb {
- struct net *net;
+ struct net *tgt_net;
+ struct net *ref_net;
struct sk_buff *skb;
- struct netlink_callback *cb;
+ struct net_fill_args fillargs;
int idx;
int s_idx;
};
@@ -831,9 +908,10 @@ static int rtnl_net_dumpid_one(int id, void *peer, void *data)
if (net_cb->idx < net_cb->s_idx)
goto cont;
- ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
- net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWNSID, net_cb->net, id);
+ net_cb->fillargs.nsid = id;
+ if (net_cb->fillargs.add_ref)
+ net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
+ ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
if (ret < 0)
return ret;
@@ -842,33 +920,96 @@ cont:
return 0;
}
+static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
+ struct rtnl_net_dump_cb *net_cb,
+ struct netlink_callback *cb)
+{
+ struct netlink_ext_ack *extack = cb->extack;
+ struct nlattr *tb[NETNSA_MAX + 1];
+ int err, i;
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= NETNSA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ if (i == NETNSA_TARGET_NSID) {
+ struct net *net;
+
+ net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
+ if (IS_ERR(net)) {
+ NL_SET_BAD_ATTR(extack, tb[i]);
+ NL_SET_ERR_MSG(extack,
+ "Invalid target network namespace id");
+ return PTR_ERR(net);
+ }
+ net_cb->fillargs.add_ref = true;
+ net_cb->ref_net = net_cb->tgt_net;
+ net_cb->tgt_net = net;
+ } else {
+ NL_SET_BAD_ATTR(extack, tb[i]);
+ NL_SET_ERR_MSG(extack,
+ "Unsupported attribute in dump request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct net *net = sock_net(skb->sk);
struct rtnl_net_dump_cb net_cb = {
- .net = net,
+ .tgt_net = sock_net(skb->sk),
.skb = skb,
- .cb = cb,
+ .fillargs = {
+ .portid = NETLINK_CB(cb->skb).portid,
+ .seq = cb->nlh->nlmsg_seq,
+ .flags = NLM_F_MULTI,
+ .cmd = RTM_NEWNSID,
+ },
.idx = 0,
.s_idx = cb->args[0],
};
+ int err = 0;
- if (cb->strict_check &&
- nlmsg_attrlen(cb->nlh, sizeof(struct rtgenmsg))) {
- NL_SET_ERR_MSG(cb->extack, "Unknown data in network namespace id dump request");
- return -EINVAL;
+ if (cb->strict_check) {
+ err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
+ if (err < 0)
+ goto end;
}
- spin_lock_bh(&net->nsid_lock);
- idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_bh(&net->nsid_lock);
+ spin_lock_bh(&net_cb.tgt_net->nsid_lock);
+ if (net_cb.fillargs.add_ref &&
+ !net_eq(net_cb.ref_net, net_cb.tgt_net) &&
+ !spin_trylock_bh(&net_cb.ref_net->nsid_lock)) {
+ spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
+ err = -EAGAIN;
+ goto end;
+ }
+ idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+ if (net_cb.fillargs.add_ref &&
+ !net_eq(net_cb.ref_net, net_cb.tgt_net))
+ spin_unlock_bh(&net_cb.ref_net->nsid_lock);
+ spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
cb->args[0] = net_cb.idx;
- return skb->len;
+end:
+ if (net_cb.fillargs.add_ref)
+ put_net(net_cb.tgt_net);
+ return err < 0 ? err : skb->len;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
{
+ struct net_fill_args fillargs = {
+ .cmd = cmd,
+ .nsid = id,
+ };
struct sk_buff *msg;
int err = -ENOMEM;
@@ -876,7 +1017,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id)
if (!msg)
goto out;
- err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
+ err = rtnl_net_fill(msg, &fillargs);
if (err < 0)
goto err_out;
@@ -917,7 +1058,8 @@ static int __init net_ns_init(void)
init_net_initialized = true;
up_write(&pernet_ops_rwsem);
- register_pernet_subsys(&net_ns_ops);
+ if (register_pernet_subsys(&net_ns_ops))
+ panic("Could not register network namespace subsystems");
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 5e4f04004a49..7bf833598615 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -106,6 +106,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)cs->classid);
task_unlock(p);
+ cond_resched();
}
css_task_iter_end(&it);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5da9552b186b..361aabffb8c0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -663,7 +663,7 @@ int netpoll_setup(struct netpoll *np)
np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
- err = dev_open(ndev);
+ err = dev_open(ndev, NULL);
if (err) {
np_err(np, "failed to open %s\n", ndev->name);
@@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
err = 0;
@@ -800,7 +801,7 @@ void __netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL);
- call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
+ call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
} else
RCU_INIT_POINTER(np->dev->npinfo, NULL);
}
@@ -811,7 +812,7 @@ void __netpoll_free(struct netpoll *np)
ASSERT_RTNL();
/* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
+ synchronize_rcu();
__netpoll_cleanup(np);
kfree(np);
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 43a932cb609b..5b2252c6d49b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -136,17 +136,19 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_map;
- /* Setup DMA mapping: use page->private for DMA-addr
+ /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
+ * since dma_addr_t can be either 32 or 64 bits and does not always fit
+ * into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page(pool->p.dev, page, 0,
- (PAGE_SIZE << pool->p.order),
- pool->p.dma_dir);
+ dma = dma_map_page_attrs(pool->p.dev, page, 0,
+ (PAGE_SIZE << pool->p.order),
+ pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(pool->p.dev, dma)) {
put_page(page);
return NULL;
}
- set_page_private(page, dma); /* page->private = dma; */
+ page->dma_addr = dma;
skip_dma_map:
/* When page just alloc'ed is should/must have refcnt 1. */
@@ -175,13 +177,17 @@ EXPORT_SYMBOL(page_pool_alloc_pages);
static void __page_pool_clean_page(struct page_pool *pool,
struct page *page)
{
+ dma_addr_t dma;
+
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
return;
+ dma = page->dma_addr;
/* DMA unmap */
- dma_unmap_page(pool->p.dev, page_private(page),
- PAGE_SIZE << pool->p.order, pool->p.dma_dir);
- set_page_private(page, 0);
+ dma_unmap_page_attrs(pool->p.dev, dma,
+ PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ page->dma_addr = 0;
}
/* Return a page to the page allocator, cleaning up our state */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6ac919847ce6..f3f5a78cd062 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -158,6 +158,7 @@
#include <linux/etherdevice.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
+#include <linux/mmzone.h>
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
@@ -3625,7 +3626,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_cfi = 0;
pkt_dev->svlan_id = 0xffff;
pkt_dev->burst = 1;
- pkt_dev->node = -1;
+ pkt_dev->node = NUMA_NO_NODE;
err = pktgen_setup_dev(t->net, pkt_dev, ifname);
if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 0958c7be2c22..a51cab95ba64 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -46,7 +46,6 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
@@ -59,7 +58,7 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
-#define RTNL_MAX_TYPE 49
+#define RTNL_MAX_TYPE 50
#define RTNL_SLAVE_MAX_TYPE 36
struct rtnl_link {
@@ -1146,22 +1145,17 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
+ struct netdev_phys_item_id ppid = { };
int err;
- struct switchdev_attr attr = {
- .orig_dev = dev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
- };
- err = switchdev_port_attr_get(dev, &attr);
+ err = dev_get_port_parent_id(dev, &ppid, false);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
return err;
}
- if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
- attr.u.ppid.id))
+ if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
return -EMSGSIZE;
return 0;
@@ -2444,7 +2438,7 @@ static int do_setlink(const struct sk_buff *skb,
sa->sa_family = dev->type;
memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
dev->addr_len);
- err = dev_set_mac_address(dev, sa);
+ err = dev_set_mac_address(dev, sa, extack);
kfree(sa);
if (err)
goto errout;
@@ -2489,7 +2483,8 @@ static int do_setlink(const struct sk_buff *skb,
}
if (ifm->ifi_flags || ifm->ifi_change) {
- err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
+ err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
+ extack);
if (err < 0)
goto errout;
}
@@ -2870,7 +2865,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
old_flags = dev->flags;
if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
- err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
+ err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
+ NULL);
if (err < 0)
return err;
}
@@ -2885,9 +2881,11 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
}
EXPORT_SYMBOL(rtnl_configure_link);
-struct net_device *rtnl_create_link(struct net *net,
- const char *ifname, unsigned char name_assign_type,
- const struct rtnl_link_ops *ops, struct nlattr *tb[])
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ unsigned char name_assign_type,
+ const struct rtnl_link_ops *ops,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
{
struct net_device *dev;
unsigned int num_tx_queues = 1;
@@ -2903,11 +2901,15 @@ struct net_device *rtnl_create_link(struct net *net,
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
- if (num_tx_queues < 1 || num_tx_queues > 4096)
+ if (num_tx_queues < 1 || num_tx_queues > 4096) {
+ NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
return ERR_PTR(-EINVAL);
+ }
- if (num_rx_queues < 1 || num_rx_queues > 4096)
+ if (num_rx_queues < 1 || num_rx_queues > 4096) {
+ NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
return ERR_PTR(-EINVAL);
+ }
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
@@ -2965,20 +2967,24 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
return 0;
}
-static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
+static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr **attr, struct netlink_ext_ack *extack)
{
+ struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
+ unsigned char name_assign_type = NET_NAME_USER;
+ struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
+ const struct rtnl_link_ops *m_ops = NULL;
+ struct net_device *master_dev = NULL;
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
- const struct rtnl_link_ops *m_ops = NULL;
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net *dest_net, *link_net;
+ struct nlattr **slave_data;
+ char kind[MODULE_NAME_LEN];
struct net_device *dev;
- struct net_device *master_dev = NULL;
struct ifinfomsg *ifm;
- char kind[MODULE_NAME_LEN];
char ifname[IFNAMSIZ];
- struct nlattr *tb[IFLA_MAX+1];
- struct nlattr *linkinfo[IFLA_INFO_MAX+1];
- unsigned char name_assign_type = NET_NAME_USER;
+ struct nlattr **data;
int err;
#ifdef CONFIG_MODULES
@@ -3034,193 +3040,247 @@ replay:
ops = NULL;
}
- if (1) {
- struct nlattr *attr[RTNL_MAX_TYPE + 1];
- struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
- struct nlattr **data = NULL;
- struct nlattr **slave_data = NULL;
- struct net *dest_net, *link_net = NULL;
-
- if (ops) {
- if (ops->maxtype > RTNL_MAX_TYPE)
- return -EINVAL;
+ data = NULL;
+ if (ops) {
+ if (ops->maxtype > RTNL_MAX_TYPE)
+ return -EINVAL;
- if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
- err = nla_parse_nested(attr, ops->maxtype,
- linkinfo[IFLA_INFO_DATA],
- ops->policy, NULL);
- if (err < 0)
- return err;
- data = attr;
- }
- if (ops->validate) {
- err = ops->validate(tb, data, extack);
- if (err < 0)
- return err;
- }
+ if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
+ err = nla_parse_nested(attr, ops->maxtype,
+ linkinfo[IFLA_INFO_DATA],
+ ops->policy, extack);
+ if (err < 0)
+ return err;
+ data = attr;
+ }
+ if (ops->validate) {
+ err = ops->validate(tb, data, extack);
+ if (err < 0)
+ return err;
}
+ }
- if (m_ops) {
- if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
- return -EINVAL;
+ slave_data = NULL;
+ if (m_ops) {
+ if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
+ return -EINVAL;
- if (m_ops->slave_maxtype &&
- linkinfo[IFLA_INFO_SLAVE_DATA]) {
- err = nla_parse_nested(slave_attr,
- m_ops->slave_maxtype,
- linkinfo[IFLA_INFO_SLAVE_DATA],
- m_ops->slave_policy,
- NULL);
- if (err < 0)
- return err;
- slave_data = slave_attr;
- }
+ if (m_ops->slave_maxtype &&
+ linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ err = nla_parse_nested(slave_attr, m_ops->slave_maxtype,
+ linkinfo[IFLA_INFO_SLAVE_DATA],
+ m_ops->slave_policy, extack);
+ if (err < 0)
+ return err;
+ slave_data = slave_attr;
}
+ }
- if (dev) {
- int status = 0;
-
- if (nlh->nlmsg_flags & NLM_F_EXCL)
- return -EEXIST;
- if (nlh->nlmsg_flags & NLM_F_REPLACE)
- return -EOPNOTSUPP;
+ if (dev) {
+ int status = 0;
- if (linkinfo[IFLA_INFO_DATA]) {
- if (!ops || ops != dev->rtnl_link_ops ||
- !ops->changelink)
- return -EOPNOTSUPP;
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
- err = ops->changelink(dev, tb, data, extack);
- if (err < 0)
- return err;
- status |= DO_SETLINK_NOTIFY;
- }
+ if (linkinfo[IFLA_INFO_DATA]) {
+ if (!ops || ops != dev->rtnl_link_ops ||
+ !ops->changelink)
+ return -EOPNOTSUPP;
- if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
- if (!m_ops || !m_ops->slave_changelink)
- return -EOPNOTSUPP;
+ err = ops->changelink(dev, tb, data, extack);
+ if (err < 0)
+ return err;
+ status |= DO_SETLINK_NOTIFY;
+ }
- err = m_ops->slave_changelink(master_dev, dev,
- tb, slave_data,
- extack);
- if (err < 0)
- return err;
- status |= DO_SETLINK_NOTIFY;
- }
+ if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ if (!m_ops || !m_ops->slave_changelink)
+ return -EOPNOTSUPP;
- return do_setlink(skb, dev, ifm, extack, tb, ifname,
- status);
+ err = m_ops->slave_changelink(master_dev, dev, tb,
+ slave_data, extack);
+ if (err < 0)
+ return err;
+ status |= DO_SETLINK_NOTIFY;
}
- if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
- if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
- return rtnl_group_changelink(skb, net,
+ return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+ return rtnl_group_changelink(skb, net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, extack, tb);
- return -ENODEV;
- }
+ return -ENODEV;
+ }
- if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
- return -EOPNOTSUPP;
+ if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
+ return -EOPNOTSUPP;
- if (!ops) {
+ if (!ops) {
#ifdef CONFIG_MODULES
- if (kind[0]) {
- __rtnl_unlock();
- request_module("rtnl-link-%s", kind);
- rtnl_lock();
- ops = rtnl_link_ops_get(kind);
- if (ops)
- goto replay;
- }
-#endif
- return -EOPNOTSUPP;
+ if (kind[0]) {
+ __rtnl_unlock();
+ request_module("rtnl-link-%s", kind);
+ rtnl_lock();
+ ops = rtnl_link_ops_get(kind);
+ if (ops)
+ goto replay;
}
+#endif
+ NL_SET_ERR_MSG(extack, "Unknown device type");
+ return -EOPNOTSUPP;
+ }
- if (!ops->setup)
- return -EOPNOTSUPP;
-
- if (!ifname[0]) {
- snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
- name_assign_type = NET_NAME_ENUM;
- }
+ if (!ops->setup)
+ return -EOPNOTSUPP;
- dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
+ if (!ifname[0]) {
+ snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+ name_assign_type = NET_NAME_ENUM;
+ }
- if (tb[IFLA_LINK_NETNSID]) {
- int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
+ dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
- link_net = get_net_ns_by_id(dest_net, id);
- if (!link_net) {
- err = -EINVAL;
- goto out;
- }
- err = -EPERM;
- if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
- goto out;
- }
+ if (tb[IFLA_LINK_NETNSID]) {
+ int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
- dev = rtnl_create_link(link_net ? : dest_net, ifname,
- name_assign_type, ops, tb);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
+ link_net = get_net_ns_by_id(dest_net, id);
+ if (!link_net) {
+ NL_SET_ERR_MSG(extack, "Unknown network namespace id");
+ err = -EINVAL;
goto out;
}
+ err = -EPERM;
+ if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
+ goto out;
+ } else {
+ link_net = NULL;
+ }
- dev->ifindex = ifm->ifi_index;
+ dev = rtnl_create_link(link_net ? : dest_net, ifname,
+ name_assign_type, ops, tb, extack);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out;
+ }
- if (ops->newlink) {
- err = ops->newlink(link_net ? : net, dev, tb, data,
- extack);
- /* Drivers should call free_netdev() in ->destructor
- * and unregister it on failure after registration
- * so that device could be finally freed in rtnl_unlock.
- */
- if (err < 0) {
- /* If device is not registered at all, free it now */
- if (dev->reg_state == NETREG_UNINITIALIZED)
- free_netdev(dev);
- goto out;
- }
- } else {
- err = register_netdevice(dev);
- if (err < 0) {
+ dev->ifindex = ifm->ifi_index;
+
+ if (ops->newlink) {
+ err = ops->newlink(link_net ? : net, dev, tb, data, extack);
+ /* Drivers should call free_netdev() in ->destructor
+ * and unregister it on failure after registration
+ * so that device could be finally freed in rtnl_unlock.
+ */
+ if (err < 0) {
+ /* If device is not registered at all, free it now */
+ if (dev->reg_state == NETREG_UNINITIALIZED)
free_netdev(dev);
- goto out;
- }
+ goto out;
}
- err = rtnl_configure_link(dev, ifm);
+ } else {
+ err = register_netdevice(dev);
+ if (err < 0) {
+ free_netdev(dev);
+ goto out;
+ }
+ }
+ err = rtnl_configure_link(dev, ifm);
+ if (err < 0)
+ goto out_unregister;
+ if (link_net) {
+ err = dev_change_net_namespace(dev, dest_net, ifname);
if (err < 0)
goto out_unregister;
- if (link_net) {
- err = dev_change_net_namespace(dev, dest_net, ifname);
- if (err < 0)
- goto out_unregister;
- }
- if (tb[IFLA_MASTER]) {
- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
- extack);
- if (err)
- goto out_unregister;
- }
+ }
+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+ if (err)
+ goto out_unregister;
+ }
out:
- if (link_net)
- put_net(link_net);
- put_net(dest_net);
- return err;
+ if (link_net)
+ put_net(link_net);
+ put_net(dest_net);
+ return err;
out_unregister:
- if (ops->newlink) {
- LIST_HEAD(list_kill);
+ if (ops->newlink) {
+ LIST_HEAD(list_kill);
- ops->dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
- } else {
- unregister_netdevice(dev);
+ ops->dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ } else {
+ unregister_netdevice(dev);
+ }
+ goto out;
+}
+
+static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr **attr;
+ int ret;
+
+ attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ ret = __rtnl_newlink(skb, nlh, attr, extack);
+ kfree(attr);
+ return ret;
+}
+
+static int rtnl_valid_getlink_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifinfomsg *ifm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for get link");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
+ extack);
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
+ extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFLA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFLA_IFNAME:
+ case IFLA_EXT_MASK:
+ case IFLA_TARGET_NETNSID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
+ return -EINVAL;
}
- goto out;
}
+
+ return 0;
}
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3237,7 +3297,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 ext_filter_mask = 0;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
+ err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -3333,6 +3393,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
int idx;
int s_idx = cb->family;
int type = cb->nlh->nlmsg_type - RTM_BASE;
+ int ret = 0;
if (s_idx == 0)
s_idx = 1;
@@ -3365,12 +3426,13 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
cb->prev_seq = 0;
cb->seq = 0;
}
- if (dumpit(skb, cb))
+ ret = dumpit(skb, cb);
+ if (ret)
break;
}
cb->family = idx;
- return skb->len;
+ return skb->len ? : ret;
}
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
@@ -3598,6 +3660,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
+ if (dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
+ return -EINVAL;
+ }
+
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
@@ -3613,7 +3680,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
const struct net_device_ops *ops = br_dev->netdev_ops;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags, extack);
if (err)
goto out;
else
@@ -3625,7 +3692,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (dev->netdev_ops->ndo_fdb_add)
err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
vid,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags,
+ extack);
else
err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
@@ -3702,6 +3770,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
+ if (dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
+ return -EINVAL;
+ }
+
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
@@ -3788,6 +3861,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
{
int err;
+ if (dev->type != ARPHRD_ETHER)
+ return -EINVAL;
+
netif_addr_lock_bh(dev);
err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
if (err)
@@ -3987,6 +4063,165 @@ out:
return skb->len;
}
+static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
+ struct nlattr **tb, u8 *ndm_flags,
+ int *br_idx, int *brport_idx, u8 **addr,
+ u16 *vid, struct netlink_ext_ack *extack)
+{
+ struct ndmsg *ndm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
+ return -EINVAL;
+ }
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
+ ndm->ndm_type) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
+ return -EINVAL;
+ }
+
+ if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ nda_policy, extack);
+ if (err < 0)
+ return err;
+
+ *ndm_flags = ndm->ndm_flags;
+ *brport_idx = ndm->ndm_ifindex;
+ for (i = 0; i <= NDA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NDA_MASTER:
+ *br_idx = nla_get_u32(tb[i]);
+ break;
+ case NDA_LLADDR:
+ if (nla_len(tb[i]) != ETH_ALEN) {
+ NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
+ return -EINVAL;
+ }
+ *addr = nla_data(tb[i]);
+ break;
+ case NDA_VLAN:
+ err = fdb_vid_parse(tb[i], vid, extack);
+ if (err)
+ return err;
+ break;
+ case NDA_VNI:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = NULL, *br_dev = NULL;
+ const struct net_device_ops *ops = NULL;
+ struct net *net = sock_net(in_skb->sk);
+ struct nlattr *tb[NDA_MAX + 1];
+ struct sk_buff *skb;
+ int brport_idx = 0;
+ u8 ndm_flags = 0;
+ int br_idx = 0;
+ u8 *addr = NULL;
+ u16 vid = 0;
+ int err;
+
+ err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
+ &brport_idx, &addr, &vid, extack);
+ if (err < 0)
+ return err;
+
+ if (!addr) {
+ NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
+ return -EINVAL;
+ }
+
+ if (brport_idx) {
+ dev = __dev_get_by_index(net, brport_idx);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Unknown device ifindex");
+ return -ENODEV;
+ }
+ }
+
+ if (br_idx) {
+ if (dev) {
+ NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
+ return -EINVAL;
+ }
+
+ br_dev = __dev_get_by_index(net, br_idx);
+ if (!br_dev) {
+ NL_SET_ERR_MSG(extack, "Invalid master ifindex");
+ return -EINVAL;
+ }
+ ops = br_dev->netdev_ops;
+ }
+
+ if (dev) {
+ if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
+ if (!(dev->priv_flags & IFF_BRIDGE_PORT)) {
+ NL_SET_ERR_MSG(extack, "Device is not a bridge port");
+ return -EINVAL;
+ }
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev) {
+ NL_SET_ERR_MSG(extack, "Master of device not found");
+ return -EINVAL;
+ }
+ ops = br_dev->netdev_ops;
+ } else {
+ if (!(ndm_flags & NTF_SELF)) {
+ NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
+ return -EINVAL;
+ }
+ ops = dev->netdev_ops;
+ }
+ }
+
+ if (!br_dev && !dev) {
+ NL_SET_ERR_MSG(extack, "No device specified");
+ return -ENODEV;
+ }
+
+ if (!ops || !ops->ndo_fdb_get) {
+ NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
+ return -EOPNOTSUPP;
+ }
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ if (br_dev)
+ dev = br_dev;
+ err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, extack);
+ if (err)
+ goto out;
+
+ return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+out:
+ kfree_skb(skb);
+ return err;
+}
+
static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
unsigned int attrnum, unsigned int flag)
{
@@ -4298,7 +4533,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
+ err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
+ extack);
if (err)
goto out;
@@ -4310,7 +4546,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
err = -EOPNOTSUPP;
else
err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
- flags);
+ flags,
+ extack);
if (!err) {
flags &= ~BRIDGE_FLAGS_SELF;
@@ -4706,6 +4943,40 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
return size;
}
+static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
+ bool is_dump, struct netlink_ext_ack *extack)
+{
+ struct if_stats_msg *ifsm;
+
+ if (nlh->nlmsg_len < sizeof(*ifsm)) {
+ NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
+ return -EINVAL;
+ }
+
+ if (!strict_check)
+ return 0;
+
+ ifsm = nlmsg_data(nlh);
+
+ /* only requests using strict checks can pass data to influence
+ * the dump. The legacy exception is filter_mask.
+ */
+ if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
+ return -EINVAL;
+ }
+ if (nlmsg_attrlen(nlh, sizeof(*ifsm))) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
+ return -EINVAL;
+ }
+ if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
+ NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -4717,8 +4988,10 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 filter_mask;
int err;
- if (nlmsg_len(nlh) < sizeof(*ifsm))
- return -EINVAL;
+ err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
+ false, extack);
+ if (err)
+ return err;
ifsm = nlmsg_data(nlh);
if (ifsm->ifindex > 0)
@@ -4770,27 +5043,11 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = net->dev_base_seq;
- if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) {
- NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
- return -EINVAL;
- }
+ err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
+ if (err)
+ return err;
ifsm = nlmsg_data(cb->nlh);
-
- /* only requests using strict checks can pass data to influence
- * the dump. The legacy exception is filter_mask.
- */
- if (cb->strict_check) {
- if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) {
- NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
- return -EINVAL;
- }
- if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) {
- NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
- return -EINVAL;
- }
- }
-
filter_mask = ifsm->filter_mask;
if (!filter_mask) {
NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
@@ -5045,7 +5302,7 @@ void __init rtnetlink_init(void)
rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
+ rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
diff --git a/net/core/scm.c b/net/core/scm.c
index b1ff8a441748..52ef219cf6df 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -29,6 +29,7 @@
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
+#include <linux/errqueue.h>
#include <linux/uaccess.h>
@@ -252,6 +253,32 @@ out:
}
EXPORT_SYMBOL(put_cmsg);
+void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+{
+ struct scm_timestamping64 tss;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
+ tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
+ tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
+ }
+
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_NEW, sizeof(tss), &tss);
+}
+EXPORT_SYMBOL(put_cmsg_scm_timestamping64);
+
+void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+{
+ struct scm_timestamping tss;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tss.ts); i++)
+ tss.ts[i] = timespec64_to_timespec(tss_internal->ts[i]);
+
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_OLD, sizeof(tss), &tss);
+}
+EXPORT_SYMBOL(put_cmsg_scm_timestamping);
+
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
struct cmsghdr __user *cm
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 946de0e24c87..2415d9cb9b89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,9 @@
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
+#ifdef CONFIG_SKB_EXTENSIONS
+static struct kmem_cache *skbuff_ext_cache __ro_after_init;
+#endif
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
@@ -353,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -366,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);
@@ -606,7 +613,6 @@ fastpath:
void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
- secpath_reset(skb);
if (skb->destructor) {
WARN_ON(in_irq());
skb->destructor(skb);
@@ -614,9 +620,7 @@ void skb_release_head_state(struct sk_buff *skb)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb_nfct(skb));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(skb->nf_bridge);
-#endif
+ skb_ext_put(skb);
}
/* Free everything but the sk_buff shell. */
@@ -796,9 +800,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->dev = old->dev;
memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old);
-#ifdef CONFIG_XFRM
- new->sp = secpath_get(old->sp);
-#endif
+ __skb_ext_copy(new, old);
__nf_copy(new, old, false);
/* Note : this field could be in headers_start/headers_end section
@@ -1089,7 +1091,7 @@ void sock_zerocopy_put(struct ubuf_info *uarg)
}
EXPORT_SYMBOL_GPL(sock_zerocopy_put);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg)
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
struct sock *sk = skb_from_uarg(uarg)->sk;
@@ -1097,7 +1099,8 @@ void sock_zerocopy_put_abort(struct ubuf_info *uarg)
atomic_dec(&sk->sk_zckey);
uarg->len--;
- sock_zerocopy_put(uarg);
+ if (have_uref)
+ sock_zerocopy_put(uarg);
}
}
EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
@@ -1105,6 +1108,12 @@ EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length);
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
+{
+ return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
+}
+EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
+
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg)
@@ -1131,7 +1140,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
return err;
}
- skb_zcopy_set(skb, uarg);
+ skb_zcopy_set(skb, uarg, NULL);
return skb->len - orig_len;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
@@ -1151,7 +1160,7 @@ static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
if (skb_copy_ubufs(nskb, GFP_ATOMIC))
return -EIO;
}
- skb_zcopy_set(nskb, skb_uarg(orig));
+ skb_zcopy_set(nskb, skb_uarg(orig), NULL);
}
return 0;
}
@@ -1925,8 +1934,6 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
struct sk_buff *insp = NULL;
do {
- BUG_ON(!list);
-
if (list->len <= eat) {
/* Eaten as whole. */
eat -= list->len;
@@ -2366,19 +2373,6 @@ error:
}
EXPORT_SYMBOL_GPL(skb_send_sock_locked);
-/* Send skb data on a socket. */
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
-{
- int ret = 0;
-
- lock_sock(sk);
- ret = skb_send_sock_locked(sk, skb, offset, len);
- release_sock(sk);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(skb_send_sock);
-
/**
* skb_store_bits - store bits from kernel buffer to skb
* @skb: destination buffer
@@ -2645,6 +2639,65 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
+{
+ __sum16 sum;
+
+ sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
+ /* See comments in __skb_checksum_complete(). */
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev, skb);
+ }
+ if (!skb_shared(skb))
+ skb->csum_valid = !sum;
+ return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete_head);
+
+/* This function assumes skb->csum already holds pseudo header's checksum,
+ * which has been changed from the hardware checksum, for example, by
+ * __skb_checksum_validate_complete(). And, the original skb->csum must
+ * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
+ *
+ * It returns non-zero if the recomputed checksum is still invalid, otherwise
+ * zero. The new checksum is stored back into skb->csum unless the skb is
+ * shared.
+ */
+__sum16 __skb_checksum_complete(struct sk_buff *skb)
+{
+ __wsum csum;
+ __sum16 sum;
+
+ csum = skb_checksum(skb, 0, skb->len, 0);
+
+ sum = csum_fold(csum_add(skb->csum, csum));
+ /* This check is inverted, because we already knew the hardware
+ * checksum is invalid before calling this function. So, if the
+ * re-computed checksum is valid instead, then we have a mismatch
+ * between the original skb->csum and skb_checksum(). This means either
+ * the original hardware checksum is incorrect or we screw up skb->csum
+ * when moving skb->data around.
+ */
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev, skb);
+ }
+
+ if (!skb_shared(skb)) {
+ /* Save full packet checksum */
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum_complete_sw = 1;
+ skb->csum_valid = !sum;
+ }
+
+ return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete);
+
static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
{
net_warn_ratelimited(
@@ -2962,28 +3015,6 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
}
EXPORT_SYMBOL(skb_append);
-/**
- * skb_insert - insert a buffer
- * @old: buffer to insert before
- * @newsk: buffer to insert
- * @list: list to use
- *
- * Place a packet before a given packet in a list. The list locks are
- * taken and this function is atomic with respect to other list locked
- * calls.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- __skb_insert(newsk, old->prev, old, list);
- spin_unlock_irqrestore(&list->lock, flags);
-}
-EXPORT_SYMBOL(skb_insert);
-
static inline void skb_split_inside_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, const int pos)
@@ -3873,6 +3904,46 @@ done:
}
EXPORT_SYMBOL_GPL(skb_gro_receive);
+#ifdef CONFIG_SKB_EXTENSIONS
+#define SKB_EXT_ALIGN_VALUE 8
+#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
+
+static const u8 skb_ext_type_len[] = {
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
+#endif
+#ifdef CONFIG_XFRM
+ [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
+#endif
+};
+
+static __always_inline unsigned int skb_ext_total_length(void)
+{
+ return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
+#endif
+#ifdef CONFIG_XFRM
+ skb_ext_type_len[SKB_EXT_SEC_PATH] +
+#endif
+ 0;
+}
+
+static void skb_extensions_init(void)
+{
+ BUILD_BUG_ON(SKB_EXT_NUM >= 8);
+ BUILD_BUG_ON(skb_ext_total_length() > 255);
+
+ skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
+ SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
+ 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
+}
+#else
+static void skb_extensions_init(void) {}
+#endif
+
void __init skb_init(void)
{
skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
@@ -3887,6 +3958,7 @@ void __init skb_init(void)
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
+ skb_extensions_init();
}
static int
@@ -4854,6 +4926,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
nf_reset(skb);
nf_reset_trace(skb);
+#ifdef CONFIG_NET_SWITCHDEV
+ skb->offload_fwd_mark = 0;
+ skb->offload_l3_fwd_mark = 0;
+#endif
+
if (!xnet)
return;
@@ -4944,6 +5021,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
*
* This is a helper to do that correctly considering GSO_BY_FRAGS.
*
+ * @skb: GSO skb
+ *
* @seg_len: The segmented length (from skb_gso_*_seglen). In the
* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
*
@@ -5121,7 +5200,7 @@ int skb_vlan_pop(struct sk_buff *skb)
int err;
if (likely(skb_vlan_tag_present(skb))) {
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
} else {
if (unlikely(!eth_type_vlan(skb->protocol)))
return 0;
@@ -5195,7 +5274,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long chunk;
struct sk_buff *skb;
struct page *page;
- gfp_t gfp_head;
int i;
*errcode = -EMSGSIZE;
@@ -5205,12 +5283,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
if (npages > MAX_SKB_FRAGS)
return NULL;
- gfp_head = gfp_mask;
- if (gfp_head & __GFP_DIRECT_RECLAIM)
- gfp_head |= __GFP_RETRY_MAYFAIL;
-
*errcode = -ENOBUFS;
- skb = alloc_skb(header_len, gfp_head);
+ skb = alloc_skb(header_len, gfp_mask);
if (!skb)
return NULL;
@@ -5518,3 +5592,148 @@ void skb_condense(struct sk_buff *skb)
*/
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
}
+
+#ifdef CONFIG_SKB_EXTENSIONS
+static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
+{
+ return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
+}
+
+static struct skb_ext *skb_ext_alloc(void)
+{
+ struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
+
+ if (new) {
+ memset(new->offset, 0, sizeof(new->offset));
+ refcount_set(&new->refcnt, 1);
+ }
+
+ return new;
+}
+
+static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
+ unsigned int old_active)
+{
+ struct skb_ext *new;
+
+ if (refcount_read(&old->refcnt) == 1)
+ return old;
+
+ new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
+ if (!new)
+ return NULL;
+
+ memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
+ refcount_set(&new->refcnt, 1);
+
+#ifdef CONFIG_XFRM
+ if (old_active & (1 << SKB_EXT_SEC_PATH)) {
+ struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
+ unsigned int i;
+
+ for (i = 0; i < sp->len; i++)
+ xfrm_state_hold(sp->xvec[i]);
+ }
+#endif
+ __skb_ext_put(old);
+ return new;
+}
+
+/**
+ * skb_ext_add - allocate space for given extension, COW if needed
+ * @skb: buffer
+ * @id: extension to allocate space for
+ *
+ * Allocates enough space for the given extension.
+ * If the extension is already present, a pointer to that extension
+ * is returned.
+ *
+ * If the skb was cloned, COW applies and the returned memory can be
+ * modified without changing the extension space of clones buffers.
+ *
+ * Returns pointer to the extension or NULL on allocation failure.
+ */
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
+{
+ struct skb_ext *new, *old = NULL;
+ unsigned int newlen, newoff;
+
+ if (skb->active_extensions) {
+ old = skb->extensions;
+
+ new = skb_ext_maybe_cow(old, skb->active_extensions);
+ if (!new)
+ return NULL;
+
+ if (__skb_ext_exist(new, id))
+ goto set_active;
+
+ newoff = new->chunks;
+ } else {
+ newoff = SKB_EXT_CHUNKSIZEOF(*new);
+
+ new = skb_ext_alloc();
+ if (!new)
+ return NULL;
+ }
+
+ newlen = newoff + skb_ext_type_len[id];
+ new->chunks = newlen;
+ new->offset[id] = newoff;
+set_active:
+ skb->extensions = new;
+ skb->active_extensions |= 1 << id;
+ return skb_ext_get_ptr(new, id);
+}
+EXPORT_SYMBOL(skb_ext_add);
+
+#ifdef CONFIG_XFRM
+static void skb_ext_put_sp(struct sec_path *sp)
+{
+ unsigned int i;
+
+ for (i = 0; i < sp->len; i++)
+ xfrm_state_put(sp->xvec[i]);
+}
+#endif
+
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
+{
+ struct skb_ext *ext = skb->extensions;
+
+ skb->active_extensions &= ~(1 << id);
+ if (skb->active_extensions == 0) {
+ skb->extensions = NULL;
+ __skb_ext_put(ext);
+#ifdef CONFIG_XFRM
+ } else if (id == SKB_EXT_SEC_PATH &&
+ refcount_read(&ext->refcnt) == 1) {
+ struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
+
+ skb_ext_put_sp(sp);
+ sp->len = 0;
+#endif
+ }
+}
+EXPORT_SYMBOL(__skb_ext_del);
+
+void __skb_ext_put(struct skb_ext *ext)
+{
+ /* If this is last clone, nothing can increment
+ * it after check passes. Avoids one atomic op.
+ */
+ if (refcount_read(&ext->refcnt) == 1)
+ goto free_now;
+
+ if (!refcount_dec_and_test(&ext->refcnt))
+ return;
+free_now:
+#ifdef CONFIG_XFRM
+ if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
+ skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
+#endif
+
+ kmem_cache_free(skbuff_ext_cache, ext);
+}
+EXPORT_SYMBOL(__skb_ext_put);
+#endif /* CONFIG_SKB_EXTENSIONS */
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 56a99d0c9aa0..ae6f06e45737 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -78,11 +78,9 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
{
int i = src->sg.start;
struct scatterlist *sge = sk_msg_elem(src, i);
+ struct scatterlist *sgd = NULL;
u32 sge_len, sge_off;
- if (sk_msg_full(dst))
- return -ENOSPC;
-
while (off) {
if (sge->length > off)
break;
@@ -95,12 +93,26 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
while (len) {
sge_len = sge->length - off;
- sge_off = sge->offset + off;
if (sge_len > len)
sge_len = len;
+
+ if (dst->sg.end)
+ sgd = sk_msg_elem(dst, dst->sg.end - 1);
+
+ if (sgd &&
+ (sg_page(sge) == sg_page(sgd)) &&
+ (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
+ sgd->length += sge_len;
+ dst->sg.size += sge_len;
+ } else if (!sk_msg_full(dst)) {
+ sge_off = sge->offset + off;
+ sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
+ } else {
+ return -ENOSPC;
+ }
+
off = 0;
len -= sge_len;
- sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
sk_mem_charge(sk, sge_len);
sk_msg_iter_var_next(i);
if (i == src->sg.end && len)
@@ -403,7 +415,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
msg->skb = skb;
sk_psock_queue_msg(psock, msg);
- sk->sk_data_ready(sk);
+ sk_psock_data_ready(sk, psock);
return copied;
}
@@ -542,8 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
/* No sk_callback_lock since already detached. */
- if (psock->parser.enabled)
- strp_done(&psock->parser.strp);
+ strp_done(&psock->parser.strp);
cancel_work_sync(&psock->work);
@@ -572,6 +583,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
{
rcu_assign_sk_user_data(sk, NULL);
sk_psock_cork_free(psock);
+ sk_psock_zap_ingress(psock);
sk_psock_restore_proto(sk, psock);
write_lock_bh(&sk->sk_callback_lock);
@@ -580,7 +592,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
write_unlock_bh(&sk->sk_callback_lock);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
- call_rcu_sched(&psock->rcu, sk_psock_destroy);
+ call_rcu(&psock->rcu, sk_psock_destroy);
}
EXPORT_SYMBOL_GPL(sk_psock_drop);
@@ -669,6 +681,22 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
bool ingress;
switch (verdict) {
+ case __SK_PASS:
+ sk_other = psock->sk;
+ if (sock_flag(sk_other, SOCK_DEAD) ||
+ !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ goto out_free;
+ }
+ if (atomic_read(&sk_other->sk_rmem_alloc) <=
+ sk_other->sk_rcvbuf) {
+ struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
+
+ tcp->bpf.flags |= BPF_F_INGRESS;
+ skb_queue_tail(&psock->ingress_skb, skb);
+ schedule_work(&psock->work);
+ break;
+ }
+ goto out_free;
case __SK_REDIRECT:
sk_other = tcp_skb_bpf_redirect_fetch(skb);
if (unlikely(!sk_other))
@@ -735,7 +763,7 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
}
/* Called with socket lock held. */
-static void sk_psock_data_ready(struct sock *sk)
+static void sk_psock_strp_data_ready(struct sock *sk)
{
struct sk_psock *psock;
@@ -783,7 +811,7 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
return;
parser->saved_data_ready = sk->sk_data_ready;
- sk->sk_data_ready = sk_psock_data_ready;
+ sk->sk_data_ready = sk_psock_strp_data_ready;
sk->sk_write_space = sk_psock_write_space;
parser->enabled = true;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 6fcc4bc07d19..782343bb925b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -335,14 +335,68 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(__sk_backlog_rcv);
-static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
+static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
{
- struct timeval tv;
+ struct __kernel_sock_timeval tv;
+ int size;
- if (optlen < sizeof(tv))
- return -EINVAL;
- if (copy_from_user(&tv, optval, sizeof(tv)))
- return -EFAULT;
+ if (timeo == MAX_SCHEDULE_TIMEOUT) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = timeo / HZ;
+ tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
+ }
+
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+ struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
+ *(struct old_timeval32 *)optval = tv32;
+ return sizeof(tv32);
+ }
+
+ if (old_timeval) {
+ struct __kernel_old_timeval old_tv;
+ old_tv.tv_sec = tv.tv_sec;
+ old_tv.tv_usec = tv.tv_usec;
+ *(struct __kernel_old_timeval *)optval = old_tv;
+ size = sizeof(old_tv);
+ } else {
+ *(struct __kernel_sock_timeval *)optval = tv;
+ size = sizeof(tv);
+ }
+
+ return size;
+}
+
+static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
+{
+ struct __kernel_sock_timeval tv;
+
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+ struct old_timeval32 tv32;
+
+ if (optlen < sizeof(tv32))
+ return -EINVAL;
+
+ if (copy_from_user(&tv32, optval, sizeof(tv32)))
+ return -EFAULT;
+ tv.tv_sec = tv32.tv_sec;
+ tv.tv_usec = tv32.tv_usec;
+ } else if (old_timeval) {
+ struct __kernel_old_timeval old_tv;
+
+ if (optlen < sizeof(old_tv))
+ return -EINVAL;
+ if (copy_from_user(&old_tv, optval, sizeof(old_tv)))
+ return -EFAULT;
+ tv.tv_sec = old_tv.tv_sec;
+ tv.tv_usec = old_tv.tv_usec;
+ } else {
+ if (optlen < sizeof(tv))
+ return -EINVAL;
+ if (copy_from_user(&tv, optval, sizeof(tv)))
+ return -EFAULT;
+ }
if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
return -EDOM;
@@ -360,8 +414,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
*timeo_p = MAX_SCHEDULE_TIMEOUT;
if (tv.tv_sec == 0 && tv.tv_usec == 0)
return 0;
- if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
- *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
+ if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
+ *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
return 0;
}
@@ -520,14 +574,11 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
}
EXPORT_SYMBOL(sk_dst_check);
-static int sock_setbindtodevice(struct sock *sk, char __user *optval,
- int optlen)
+static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
- char devname[IFNAMSIZ];
- int index;
/* Sorry... */
ret = -EPERM;
@@ -535,6 +586,32 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
goto out;
ret = -EINVAL;
+ if (ifindex < 0)
+ goto out;
+
+ sk->sk_bound_dev_if = ifindex;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ sk_dst_reset(sk);
+
+ ret = 0;
+
+out:
+#endif
+
+ return ret;
+}
+
+static int sock_setbindtodevice(struct sock *sk, char __user *optval,
+ int optlen)
+{
+ int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+ struct net *net = sock_net(sk);
+ char devname[IFNAMSIZ];
+ int index;
+
+ ret = -EINVAL;
if (optlen < 0)
goto out;
@@ -566,12 +643,9 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
}
lock_sock(sk);
- sk->sk_bound_dev_if = index;
- sk_dst_reset(sk);
+ ret = sock_setbindtodevice_locked(sk, index);
release_sock(sk);
- ret = 0;
-
out:
#endif
@@ -698,6 +772,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+ sk_dst_reset(sk);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -710,6 +785,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
*/
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
+ /* Ensure val * 2 fits into an int, to prevent max_t()
+ * from treating it as a negative value.
+ */
+ val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
@@ -721,6 +800,12 @@ set_sndbuf:
ret = -EPERM;
break;
}
+
+ /* No negative values (to prevent underflow, as val will be
+ * multiplied by 2).
+ */
+ if (val < 0)
+ val = 0;
goto set_sndbuf;
case SO_RCVBUF:
@@ -731,6 +816,10 @@ set_sndbuf:
*/
val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
+ /* Ensure val * 2 fits into an int, to prevent max_t()
+ * from treating it as a negative value.
+ */
+ val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* We double it on the way in to account for
@@ -755,6 +844,12 @@ set_rcvbuf:
ret = -EPERM;
break;
}
+
+ /* No negative values (to prevent underflow, as val will be
+ * multiplied by 2).
+ */
+ if (val < 0)
+ val = 0;
goto set_rcvbuf;
case SO_KEEPALIVE:
@@ -812,10 +907,17 @@ set_rcvbuf:
clear_bit(SOCK_PASSCRED, &sock->flags);
break;
- case SO_TIMESTAMP:
- case SO_TIMESTAMPNS:
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
+ case SO_TIMESTAMPNS_OLD:
+ case SO_TIMESTAMPNS_NEW:
if (valbool) {
- if (optname == SO_TIMESTAMP)
+ if (optname == SO_TIMESTAMP_NEW || optname == SO_TIMESTAMPNS_NEW)
+ sock_set_flag(sk, SOCK_TSTAMP_NEW);
+ else
+ sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+
+ if (optname == SO_TIMESTAMP_OLD || optname == SO_TIMESTAMP_NEW)
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
else
sock_set_flag(sk, SOCK_RCVTSTAMPNS);
@@ -824,10 +926,14 @@ set_rcvbuf:
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+ sock_reset_flag(sk, SOCK_TSTAMP_NEW);
}
break;
- case SO_TIMESTAMPING:
+ case SO_TIMESTAMPING_NEW:
+ sock_set_flag(sk, SOCK_TSTAMP_NEW);
+ /* fall through */
+ case SO_TIMESTAMPING_OLD:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
break;
@@ -858,9 +964,13 @@ set_rcvbuf:
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
- else
+ else {
+ if (optname == SO_TIMESTAMPING_NEW)
+ sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+ }
break;
case SO_RCVLOWAT:
@@ -872,12 +982,14 @@ set_rcvbuf:
sk->sk_rcvlowat = val ? : 1;
break;
- case SO_RCVTIMEO:
- ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
break;
- case SO_SNDTIMEO:
- ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
+ ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
break;
case SO_ATTACH_FILTER:
@@ -950,10 +1062,12 @@ set_rcvbuf:
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
- if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
- else
+ } else if (val != sk->sk_mark) {
sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
case SO_RXQ_OVFL:
@@ -994,15 +1108,23 @@ set_rcvbuf:
#endif
case SO_MAX_PACING_RATE:
- if (val != ~0U)
+ {
+ unsigned long ulval = (val == ~0U) ? ~0UL : val;
+
+ if (sizeof(ulval) != sizeof(val) &&
+ optlen >= sizeof(ulval) &&
+ get_user(ulval, (unsigned long __user *)optval)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (ulval != ~0UL)
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
- sk->sk_pacing_rate = min(sk->sk_pacing_rate,
- sk->sk_max_pacing_rate);
+ sk->sk_max_pacing_rate = ulval;
+ sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
break;
-
+ }
case SO_INCOMING_CPU:
sk->sk_incoming_cpu = val;
break;
@@ -1014,7 +1136,10 @@ set_rcvbuf:
case SO_ZEROCOPY:
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
- if (sk->sk_protocol != IPPROTO_TCP)
+ if (!((sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP) ||
+ (sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP)))
ret = -ENOTSUPP;
} else if (sk->sk_family != PF_RDS) {
ret = -ENOTSUPP;
@@ -1047,6 +1172,10 @@ set_rcvbuf:
}
break;
+ case SO_BINDTOIFINDEX:
+ ret = sock_setbindtodevice_locked(sk, val);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1090,8 +1219,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
union {
int val;
u64 val64;
+ unsigned long ulval;
struct linger ling;
- struct timeval tm;
+ struct old_timeval32 tm32;
+ struct __kernel_old_timeval tm;
+ struct __kernel_sock_timeval stm;
struct sock_txtime txtime;
} v;
@@ -1178,39 +1310,36 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
sock_warn_obsolete_bsdism("getsockopt");
break;
- case SO_TIMESTAMP:
+ case SO_TIMESTAMP_OLD:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
+ !sock_flag(sk, SOCK_TSTAMP_NEW) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
- case SO_TIMESTAMPNS:
- v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
+ case SO_TIMESTAMPNS_OLD:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
+ break;
+
+ case SO_TIMESTAMP_NEW:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
break;
- case SO_TIMESTAMPING:
+ case SO_TIMESTAMPNS_NEW:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
+ break;
+
+ case SO_TIMESTAMPING_OLD:
v.val = sk->sk_tsflags;
break;
- case SO_RCVTIMEO:
- lv = sizeof(struct timeval);
- if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
- v.tm.tv_sec = 0;
- v.tm.tv_usec = 0;
- } else {
- v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ;
- }
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
break;
- case SO_SNDTIMEO:
- lv = sizeof(struct timeval);
- if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
- v.tm.tv_sec = 0;
- v.tm.tv_usec = 0;
- } else {
- v.tm.tv_sec = sk->sk_sndtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ;
- }
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
+ lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
break;
case SO_RCVLOWAT:
@@ -1336,8 +1465,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
#endif
case SO_MAX_PACING_RATE:
- /* 32bit version */
- v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
+ if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
+ lv = sizeof(v.ulval);
+ v.ulval = sk->sk_max_pacing_rate;
+ } else {
+ /* 32bit version */
+ v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
+ }
break;
case SO_INCOMING_CPU:
@@ -1391,6 +1525,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
SOF_TXTIME_REPORT_ERRORS : 0;
break;
+ case SO_BINDTOIFINDEX:
+ v.val = sk->sk_bound_dev_if;
+ break;
+
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -1718,7 +1856,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
- atomic64_set(&newsk->sk_cookie, 0);
if (likely(newsk->sk_net_refcnt))
sock_inuse_add(sock_net(newsk), 1);
@@ -1742,7 +1879,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
- newsk->sk_wq = NULL;
+ RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
@@ -2114,7 +2251,7 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
return -EINVAL;
sockc->mark = *(u32 *)CMSG_DATA(cmsg);
break;
- case SO_TIMESTAMPING:
+ case SO_TIMESTAMPING_OLD:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
return -EINVAL;
@@ -2372,7 +2509,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
}
if (sk_has_memory_pressure(sk)) {
- int alloc;
+ u64 alloc;
if (!sk_under_memory_pressure(sk))
return 1;
@@ -2705,11 +2842,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
if (sock) {
sk->sk_type = sock->type;
- sk->sk_wq = sock->wq;
+ RCU_INIT_POINTER(sk->sk_wq, sock->wq);
sock->sk = sk;
sk->sk_uid = SOCK_INODE(sock)->i_uid;
} else {
- sk->sk_wq = NULL;
+ RCU_INIT_POINTER(sk->sk_wq, NULL);
sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
}
@@ -2743,6 +2880,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp = SK_DEFAULT_STAMP;
+#if BITS_PER_LONG==32
+ seqlock_init(&sk->sk_stamp_seq);
+#endif
atomic_set(&sk->sk_zckey, 0);
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -2842,12 +2982,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
struct timeval tv;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
if (tv.tv_sec == -1)
return -ENOENT;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
}
@@ -2858,11 +2999,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
struct timespec ts;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return -ENOENT;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
ts = ktime_to_timespec(sk->sk_stamp);
}
return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
@@ -3279,6 +3421,7 @@ int sock_load_diag_module(int family, int protocol)
#ifdef CONFIG_INET
if (family == AF_INET &&
+ protocol != IPPROTO_RAW &&
!rcu_access_pointer(inet_protos[protocol]))
return -ENOENT;
#endif
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index ba5cba56f574..d8fe3e549373 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -187,6 +187,7 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
call_rcu(&old_reuse->rcu, reuseport_free_rcu);
return 0;
}
+EXPORT_SYMBOL(reuseport_add_sock);
void reuseport_detach_sock(struct sock *sk)
{
diff --git a/net/core/stream.c b/net/core/stream.c
index 7d329fb1f553..e94bb02a5629 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -32,7 +32,7 @@ void sk_stream_write_space(struct sock *sk)
struct socket *sock = sk->sk_socket;
struct socket_wq *wq;
- if (sk_stream_is_writeable(sk) && sock) {
+ if (__sk_stream_is_writeable(sk, 1) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b1a2c5e38530..84bf2861f45f 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -28,12 +28,23 @@ static int two __maybe_unused = 2;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static long long_one __maybe_unused = 1;
+static long long_max __maybe_unused = LONG_MAX;
static int net_msg_warn; /* Unused, but still a sysctl */
int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
+/* 0 - Keep current behavior:
+ * IPv4: inherit all current settings from init_net
+ * IPv6: reset all settings to default
+ * 1 - Both inherit all current settings from init_net
+ * 2 - Both reset all settings to default
+ */
+int sysctl_devconf_inherit_init_net __read_mostly;
+EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
+
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -279,7 +290,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
return ret;
}
-# ifdef CONFIG_HAVE_EBPF_JIT
static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -290,7 +300,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
-# endif
+
+static int
+proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+}
#endif
static struct ctl_table net_core_table[] = {
@@ -397,6 +417,15 @@ static struct ctl_table net_core_table[] = {
.extra2 = &one,
},
# endif
+ {
+ .procname = "bpf_jit_limit",
+ .data = &bpf_jit_limit,
+ .maxlen = sizeof(long),
+ .mode = 0600,
+ .proc_handler = proc_dolongvec_minmax_bpf_restricted,
+ .extra1 = &long_one,
+ .extra2 = &long_max,
+ },
#endif
{
.procname = "netdev_tstamp_prequeue",
@@ -524,6 +553,15 @@ static struct ctl_table net_core_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
+ {
+ .procname = "devconf_inherit_init_net",
+ .data = &sysctl_devconf_inherit_init_net,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
{ }
};