aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/audit_change_attr.h6
-rw-r--r--include/asm-generic/delay.h96
-rw-r--r--include/asm-generic/div64.h121
-rw-r--r--include/asm-generic/io.h82
-rw-r--r--include/asm-generic/memory_model.h13
-rw-r--r--include/asm-generic/vdso/vsyscall.h3
-rw-r--r--include/asm-generic/vga.h23
-rw-r--r--include/crypto/akcipher.h69
-rw-r--r--include/crypto/internal/akcipher.h4
-rw-r--r--include/crypto/internal/ecc.h14
-rw-r--r--include/crypto/internal/rsa.h29
-rw-r--r--include/crypto/internal/sig.h80
-rw-r--r--include/crypto/public_key.h3
-rw-r--r--include/crypto/sig.h152
-rw-r--r--include/drm/intel/i915_pciids.h19
-rw-r--r--include/dt-bindings/arm/qcom,ids.h7
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-camcc.h108
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-dispcc.h87
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-videocc.h47
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h1
-rw-r--r--include/dt-bindings/clock/renesas,r9a08g045-vbattb.h13
-rw-r--r--include/dt-bindings/clock/samsung,exynos8895.h453
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h47
-rw-r--r--include/dt-bindings/power/mediatek,mt6735-power-controller.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h2
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/alarmtimer.h10
-rw-r--r--include/linux/arch_topology.h4
-rw-r--r--include/linux/arm-smccc.h32
-rw-r--r--include/linux/asn1_decoder.h1
-rw-r--r--include/linux/asn1_encoder.h1
-rw-r--r--include/linux/ath9k_platform.h51
-rw-r--r--include/linux/avf/virtchnl.h120
-rw-r--r--include/linux/bio-integrity.h4
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blk-integrity.h5
-rw-r--r--include/linux/blk-mq.h115
-rw-r--r--include/linux/blkdev.h111
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h63
-rw-r--r--include/linux/bpf_local_storage.h12
-rw-r--r--include/linux/bpf_verifier.h67
-rw-r--r--include/linux/btf.h22
-rw-r--r--include/linux/btf_ids.h1
-rw-r--r--include/linux/cfag12864b.h17
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cleanup.h71
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/clocksource_ids.h1
-rw-r--r--include/linux/compiler_types.h6
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/debugfs.h62
-rw-r--r--include/linux/debugobjects.h12
-rw-r--r--include/linux/delay.h79
-rw-r--r--include/linux/dev_printk.h1
-rw-r--r--include/linux/dim.h5
-rw-r--r--include/linux/dpll.h4
-rw-r--r--include/linux/dw_apb_timer.h3
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/efi.h17
-rw-r--r--include/linux/energy_model.h29
-rw-r--r--include/linux/entry-common.h3
-rw-r--r--include/linux/entry-kvm.h5
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/eventpoll.h2
-rw-r--r--include/linux/exportfs.h13
-rw-r--r--include/linux/fdtable.h5
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/file_ref.h177
-rw-r--r--include/linux/filelock.h5
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h2
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h39
-rw-r--r--include/linux/fs.h110
-rw-r--r--include/linux/fs_parser.h5
-rw-r--r--include/linux/fsl/netc_global.h19
-rw-r--r--include/linux/ftrace.h85
-rw-r--r--include/linux/ftrace_regs.h36
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio.h3
-rw-r--r--include/linux/hid.h21
-rw-r--r--include/linux/hid_bpf.h11
-rw-r--r--include/linux/hisi_acc_qm.h56
-rw-r--r--include/linux/hrtimer.h51
-rw-r--r--include/linux/hwmon.h5
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/if_ltalk.h8
-rw-r--r--include/linux/inetdevice.h11
-rw-r--r--include/linux/intel_vsec.h3
-rw-r--r--include/linux/interrupt.h47
-rw-r--r--include/linux/io_uring/cmd.h2
-rw-r--r--include/linux/io_uring_types.h89
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/iopoll.h52
-rw-r--r--include/linux/irqflags.h6
-rw-r--r--include/linux/irqnr.h36
-rw-r--r--include/linux/jbd2.h15
-rw-r--r--include/linux/jiffies.h15
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/logic_pio.h6
-rw-r--r--include/linux/lsm/apparmor.h17
-rw-r--r--include/linux/lsm/bpf.h16
-rw-r--r--include/linux/lsm/selinux.h16
-rw-r--r--include/linux/lsm/smack.h17
-rw-r--r--include/linux/lsm_hook_defs.h20
-rw-r--r--include/linux/mdio.h19
-rw-r--r--include/linux/memcontrol.h12
-rw-r--r--include/linux/memstick.h2
-rw-r--r--include/linux/mfd/cgbc.h44
-rw-r--r--include/linux/mfd/max5970.h12
-rw-r--r--include/linux/mlx5/driver.h33
-rw-r--r--include/linux/mlx5/fs.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h67
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mm_types.h90
-rw-r--r--include/linux/mm_types_task.h21
-rw-r--r--include/linux/mman.h28
-rw-r--r--include/linux/mmc/card.h39
-rw-r--r--include/linux/mmc/core.h21
-rw-r--r--include/linux/mmc/host.h80
-rw-r--r--include/linux/mmc/sd.h4
-rw-r--r--include/linux/mmc/sd_uhs2.h240
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/netdevice.h100
-rw-r--r--include/linux/netlink.h7
-rw-r--r--include/linux/netpoll.h3
-rw-r--r--include/linux/nfslocalio.h3
-rw-r--r--include/linux/nvme.h135
-rw-r--r--include/linux/of.h28
-rw-r--r--include/linux/of_address.h6
-rw-r--r--include/linux/of_fdt.h5
-rw-r--r--include/linux/of_graph.h49
-rw-r--r--include/linux/of_irq.h4
-rw-r--r--include/linux/packing.h32
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/page_frag_cache.h61
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pcs/pcs-xpcs.h31
-rw-r--r--include/linux/perf/arm_pmuv3.h1
-rw-r--r--include/linux/perf_event.h54
-rw-r--r--include/linux/phy.h38
-rw-r--r--include/linux/platform_data/asoc-s3c.h2
-rw-r--r--include/linux/platform_data/hwmon-s3c.h10
-rw-r--r--include/linux/platform_data/max6639.h15
-rw-r--r--include/linux/platform_data/media/omap4iss.h66
-rw-r--r--include/linux/platform_data/microchip-ksz.h1
-rw-r--r--include/linux/platform_data/x86/intel_scu_ipc.h4
-rw-r--r--include/linux/pm_domain.h15
-rw-r--r--include/linux/pm_opp.h42
-rw-r--r--include/linux/posix-timers.h72
-rw-r--r--include/linux/posix_acl.h6
-rw-r--r--include/linux/prandom.h1
-rw-r--r--include/linux/preempt.h8
-rw-r--r--include/linux/printk.h11
-rw-r--r--include/linux/pwm.h66
-rw-r--r--include/linux/random.h7
-rw-r--r--include/linux/rbtree_latch.h20
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/regmap.h63
-rw-r--r--include/linux/regulator/consumer.h37
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h5
-rw-r--r--include/linux/reset.h274
-rw-r--r--include/linux/rtnetlink.h66
-rw-r--r--include/linux/rwlock_rt.h10
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/sched/ext.h3
-rw-r--r--include/linux/sched/signal.h4
-rw-r--r--include/linux/sched/task_stack.h4
-rw-r--r--include/linux/seccomp.h5
-rw-r--r--include/linux/security.h98
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/seqlock.h98
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/skbuff.h65
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/soc/mediatek/dvfsrc.h36
-rw-r--r--include/linux/soc/mediatek/infracfg.h5
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h3
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h14
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h30
-rw-r--r--include/linux/sockptr.h4
-rw-r--r--include/linux/spi/spi.h30
-rw-r--r--include/linux/spinlock_rt.h28
-rw-r--r--include/linux/srcu.h92
-rw-r--r--include/linux/srcutiny.h3
-rw-r--r--include/linux/srcutree.h67
-rw-r--r--include/linux/syscalls.h13
-rw-r--r--include/linux/sysfb.h7
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/thermal.h6
-rw-r--r--include/linux/thread_info.h21
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/timekeeper_internal.h114
-rw-r--r--include/linux/timekeeping.h7
-rw-r--r--include/linux/timex.h8
-rw-r--r--include/linux/tpm.h3
-rw-r--r--include/linux/tpm_eventlog.h2
-rw-r--r--include/linux/uaccess.h97
-rw-r--r--include/linux/udp.h11
-rw-r--r--include/linux/unicode.h4
-rw-r--r--include/linux/uprobes.h83
-rw-r--r--include/linux/usb/uvc.h6
-rw-r--r--include/linux/user_namespace.h3
-rw-r--r--include/linux/virtio.h13
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vt_buffer.h24
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/wait_bit.h444
-rw-r--r--include/linux/wireless.h5
-rw-r--r--include/linux/wmi.h12
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h32
-rw-r--r--include/linux/ww_mutex.h14
-rw-r--r--include/linux/wwan.h4
-rw-r--r--include/linux/xattr.h4
-rw-r--r--include/media/i2c/mt9p031.h18
-rw-r--r--include/media/i2c/ths7303.h2
-rw-r--r--include/media/media-entity.h10
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/v4l2-dev.h15
-rw-r--r--include/media/v4l2-dv-timings.h66
-rw-r--r--include/media/v4l2-subdev.h17
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/bluetooth/hci.h19
-rw-r--r--include/net/bluetooth/hci_core.h85
-rw-r--r--include/net/bluetooth/mgmt.h10
-rw-r--r--include/net/bond_options.h2
-rw-r--r--include/net/busy_poll.h3
-rw-r--r--include/net/caif/cfsrvl.h1
-rw-r--r--include/net/cfg80211.h23
-rw-r--r--include/net/checksum.h6
-rw-r--r--include/net/devlink.h13
-rw-r--r--include/net/dropreason-core.h66
-rw-r--r--include/net/dsa.h15
-rw-r--r--include/net/eee.h5
-rw-r--r--include/net/fib_notifier.h2
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/flow_offload.h1
-rw-r--r--include/net/genetlink.h8
-rw-r--r--include/net/inet_connection_sock.h9
-rw-r--r--include/net/inet_sock.h12
-rw-r--r--include/net/ip.h13
-rw-r--r--include/net/ip6_fib.h8
-rw-r--r--include/net/ip_fib.h19
-rw-r--r--include/net/ip_tunnels.h23
-rw-r--r--include/net/iw_handler.h41
-rw-r--r--include/net/l3mdev.h2
-rw-r--r--include/net/lib80211.h122
-rw-r--r--include/net/mac80211.h80
-rw-r--r--include/net/mana/gdma.h6
-rw-r--r--include/net/mana/mana.h10
-rw-r--r--include/net/mctp.h18
-rw-r--r--include/net/mctpdevice.h4
-rw-r--r--include/net/neighbour.h27
-rw-r--r--include/net/neighbour_tables.h12
-rw-r--r--include/net/net_debug.h4
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/net_shaper.h120
-rw-r--r--include/net/netfilter/nf_tables.h32
-rw-r--r--include/net/netlabel.h3
-rw-r--r--include/net/netlink.h263
-rw-r--r--include/net/netns/core.h1
-rw-r--r--include/net/netns/ipv4.h9
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/nfc/nci.h2
-rw-r--r--include/net/nfc/nci_core.h4
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/phonet/pn_dev.h8
-rw-r--r--include/net/pkt_cls.h1
-rw-r--r--include/net/route.h43
-rw-r--r--include/net/rtnetlink.h34
-rw-r--r--include/net/sock.h55
-rw-r--r--include/net/tcp.h26
-rw-r--r--include/net/tcp_ao.h3
-rw-r--r--include/net/tls.h12
-rw-r--r--include/net/udp.h137
-rw-r--r--include/net/xdp_sock_drv.h14
-rw-r--r--include/net/xfrm.h17
-rw-r--r--include/net/xsk_buff_pool.h23
-rw-r--r--include/scsi/libfcoe.h2
-rw-r--r--include/soc/amlogic/reset-meson-aux.h23
-rw-r--r--include/soc/fsl/qman.h2
-rw-r--r--include/trace/events/block.h6
-rw-r--r--include/trace/events/btrfs.h39
-rw-r--r--include/trace/events/hugetlbfs.h156
-rw-r--r--include/trace/events/io_uring.h24
-rw-r--r--include/trace/events/mce.h49
-rw-r--r--include/trace/events/netfs.h2
-rw-r--r--include/trace/events/pwm.h134
-rw-r--r--include/trace/events/rxrpc.h26
-rw-r--r--include/trace/events/timestamp.h124
-rw-r--r--include/trace/stages/stage3_trace_output.h8
-rw-r--r--include/trace/stages/stage7_class_define.h1
-rw-r--r--include/uapi/asm-generic/ioctl.h14
-rw-r--r--include/uapi/asm-generic/mman.h4
-rw-r--r--include/uapi/asm-generic/siginfo.h2
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/asm-generic/unistd.h11
-rw-r--r--include/uapi/linux/batadv_packet.h29
-rw-r--r--include/uapi/linux/bpf.h9
-rw-r--r--include/uapi/linux/btrfs.h25
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--include/uapi/linux/dpll.h24
-rw-r--r--include/uapi/linux/elf.h1
-rw-r--r--include/uapi/linux/ethtool.h7
-rw-r--r--include/uapi/linux/fcntl.h4
-rw-r--r--include/uapi/linux/if_link.h17
-rw-r--r--include/uapi/linux/io_uring.h119
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_fe_config.h273
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h64
-rw-r--r--include/uapi/linux/mount.h14
-rw-r--r--include/uapi/linux/net_shaper.h95
-rw-r--r--include/uapi/linux/netdev.h4
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h18
-rw-r--r--include/uapi/linux/nfc.h3
-rw-r--r--include/uapi/linux/nl80211.h10
-rw-r--r--include/uapi/linux/perf_event.h11
-rw-r--r--include/uapi/linux/pidfd.h50
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/prctl.h22
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/sed-opal.h1
-rw-r--r--include/uapi/linux/thermal.h29
-rw-r--r--include/uapi/linux/ublk_cmd.h18
-rw-r--r--include/uapi/linux/udp.h2
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h2
-rw-r--r--include/uapi/linux/videodev2.h6
-rw-r--r--include/uapi/linux/virtio_crypto.h1
-rw-r--r--include/uapi/linux/vmclock-abi.h182
-rw-r--r--include/uapi/linux/xattr.h7
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/vdso/datapage.h8
-rw-r--r--include/vdso/page.h31
-rw-r--r--include/video/omapfb_dss.h8
339 files changed, 8296 insertions, 2257 deletions
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index e6f6074eadbf..a17e97e634a6 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -465,4 +465,6 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif
+void acpi_processor_init_invariance_cppc(void);
+
#endif
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index 331670807cf0..cc840537885f 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -11,9 +11,15 @@ __NR_lchown,
__NR_fchown,
#endif
__NR_setxattr,
+#ifdef __NR_setxattrat
+__NR_setxattrat,
+#endif
__NR_lsetxattr,
__NR_fsetxattr,
__NR_removexattr,
+#ifdef __NR_removexattrat
+__NR_removexattrat,
+#endif
__NR_lremovexattr,
__NR_fremovexattr,
#ifdef __NR_fchownat
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
index e448ac61430c..76cf237b6e4c 100644
--- a/include/asm-generic/delay.h
+++ b/include/asm-generic/delay.h
@@ -2,6 +2,9 @@
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
+#include <linux/math.h>
+#include <vdso/time64.h>
+
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
@@ -12,34 +15,73 @@ extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/*
- * The weird n/20000 thing suppresses a "comparison is always false due to
- * limited range of data type" warning with non-const 8-bit arguments.
+ * The microseconds/nanosecond delay multiplicators are used to convert a
+ * constant microseconds/nanoseconds value to a value which can be used by the
+ * architectures specific implementation to transform it into loops.
+ */
+#define UDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, USEC_PER_SEC))
+#define NDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, NSEC_PER_SEC))
+
+/*
+ * The maximum constant udelay/ndelay value picked out of thin air to prevent
+ * too long constant udelays/ndelays.
*/
+#define DELAY_CONST_MAX 20000
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) \
- ({ \
- if (__builtin_constant_p(n)) { \
- if ((n) / 20000 >= 1) \
- __bad_udelay(); \
- else \
- __const_udelay((n) * 0x10c7ul); \
- } else { \
- __udelay(n); \
- } \
- })
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) \
- ({ \
- if (__builtin_constant_p(n)) { \
- if ((n) / 20000 >= 1) \
- __bad_ndelay(); \
- else \
- __const_udelay((n) * 5ul); \
- } else { \
- __ndelay(n); \
- } \
- })
+/**
+ * udelay - Inserting a delay based on microseconds with busy waiting
+ * @usec: requested delay in microseconds
+ *
+ * When delaying in an atomic context ndelay(), udelay() and mdelay() are the
+ * only valid variants of delaying/sleeping to go with.
+ *
+ * When inserting delays in non atomic context which are shorter than the time
+ * which is required to queue e.g. an hrtimer and to enter then the scheduler,
+ * it is also valuable to use udelay(). But it is not simple to specify a
+ * generic threshold for this which will fit for all systems. An approximation
+ * is a threshold for all delays up to 10 microseconds.
+ *
+ * When having a delay which is larger than the architecture specific
+ * %MAX_UDELAY_MS value, please make sure mdelay() is used. Otherwise a overflow
+ * risk is given.
+ *
+ * Please note that ndelay(), udelay() and mdelay() may return early for several
+ * reasons (https://lists.openwall.net/linux-kernel/2011/01/09/56):
+ *
+ * #. computed loops_per_jiffy too low (due to the time taken to execute the
+ * timer interrupt.)
+ * #. cache behaviour affecting the time it takes to execute the loop function.
+ * #. CPU clock rate changes.
+ */
+static __always_inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec)) {
+ if (usec >= DELAY_CONST_MAX)
+ __bad_udelay();
+ else
+ __const_udelay(usec * UDELAY_CONST_MULT);
+ } else {
+ __udelay(usec);
+ }
+}
+
+/**
+ * ndelay - Inserting a delay based on nanoseconds with busy waiting
+ * @nsec: requested delay in nanoseconds
+ *
+ * See udelay() for basic information about ndelay() and it's variants.
+ */
+static __always_inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec)) {
+ if (nsec >= DELAY_CONST_MAX)
+ __bad_udelay();
+ else
+ __const_udelay(nsec * NDELAY_CONST_MULT);
+ } else {
+ __udelay(nsec);
+ }
+}
+#define ndelay(x) ndelay(x)
#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index 13f5aa68a455..25e7b4b58dcf 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -74,7 +74,8 @@
* do the trick here). \
*/ \
uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
- uint32_t ___p, ___bias; \
+ uint32_t ___p; \
+ bool ___bias = false; \
\
/* determine MSB of b */ \
___p = 1 << ilog2(___b); \
@@ -87,22 +88,14 @@
___x = ~0ULL / ___b * ___b - 1; \
\
/* test our ___m with res = m * x / (p << 64) */ \
- ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
- ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
- ___res += (___x & 0xffffffff) * (___m >> 32); \
- ___t = (___res < ___t) ? (1ULL << 32) : 0; \
- ___res = (___res >> 32) + ___t; \
- ___res += (___m >> 32) * (___x >> 32); \
- ___res /= ___p; \
+ ___res = (___m & 0xffffffff) * (___x & 0xffffffff); \
+ ___t = (___m & 0xffffffff) * (___x >> 32) + (___res >> 32); \
+ ___res = (___m >> 32) * (___x >> 32) + (___t >> 32); \
+ ___t = (___m >> 32) * (___x & 0xffffffff) + (___t & 0xffffffff);\
+ ___res = (___res + (___t >> 32)) / ___p; \
\
- /* Now sanitize and optimize what we've got. */ \
- if (~0ULL % (___b / (___b & -___b)) == 0) { \
- /* special case, can be simplified to ... */ \
- ___n /= (___b & -___b); \
- ___m = ~0ULL / (___b / (___b & -___b)); \
- ___p = 1; \
- ___bias = 1; \
- } else if (___res != ___x / ___b) { \
+ /* Now validate what we've got. */ \
+ if (___res != ___x / ___b) { \
/* \
* We can't get away without a bias to compensate \
* for bit truncation errors. To avoid it we'd need an \
@@ -111,45 +104,18 @@
* \
* Instead we do m = p / b and n / b = (n * m + m) / p. \
*/ \
- ___bias = 1; \
+ ___bias = true; \
/* Compute m = (p << 64) / b */ \
___m = (~0ULL / ___b) * ___p; \
___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
- } else { \
- /* \
- * Reduce m / p, and try to clear bit 31 of m when \
- * possible, otherwise that'll need extra overflow \
- * handling later. \
- */ \
- uint32_t ___bits = -(___m & -___m); \
- ___bits |= ___m >> 32; \
- ___bits = (~___bits) << 1; \
- /* \
- * If ___bits == 0 then setting bit 31 is unavoidable. \
- * Simply apply the maximum possible reduction in that \
- * case. Otherwise the MSB of ___bits indicates the \
- * best reduction we should apply. \
- */ \
- if (!___bits) { \
- ___p /= (___m & -___m); \
- ___m /= (___m & -___m); \
- } else { \
- ___p >>= ilog2(___bits); \
- ___m >>= ilog2(___bits); \
- } \
- /* No bias needed. */ \
- ___bias = 0; \
} \
\
+ /* Reduce m / p to help avoid overflow handling later. */ \
+ ___p /= (___m & -___m); \
+ ___m /= (___m & -___m); \
+ \
/* \
- * Now we have a combination of 2 conditions: \
- * \
- * 1) whether or not we need to apply a bias, and \
- * \
- * 2) whether or not there might be an overflow in the cross \
- * product determined by (___m & ((1 << 63) | (1 << 31))). \
- * \
- * Select the best way to do (m_bias + m * n) / (1 << 64). \
+ * Perform (m_bias + m * n) / (1 << 64). \
* From now on there will be actual runtime code generated. \
*/ \
___res = __arch_xprod_64(___m, ___n, ___bias); \
@@ -165,47 +131,42 @@
* Semantic: retval = ((bias ? m : 0) + m * n) >> 64
*
* The product is a 128-bit value, scaled down to 64 bits.
- * Assuming constant propagation to optimize away unused conditional code.
+ * Hoping for compile-time optimization of conditional code.
* Architectures may provide their own optimized assembly implementation.
*/
-static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+static __always_inline
+#else
+static inline
+#endif
+uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
{
uint32_t m_lo = m;
uint32_t m_hi = m >> 32;
uint32_t n_lo = n;
uint32_t n_hi = n >> 32;
- uint64_t res;
- uint32_t res_lo, res_hi, tmp;
-
- if (!bias) {
- res = ((uint64_t)m_lo * n_lo) >> 32;
- } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
- /* there can't be any overflow here */
- res = (m + (uint64_t)m_lo * n_lo) >> 32;
+ uint64_t x, y;
+
+ /* Determine if overflow handling can be dispensed with. */
+ bool no_ovf = __builtin_constant_p(m) &&
+ ((m >> 32) + (m & 0xffffffff) < 0x100000000);
+
+ if (no_ovf) {
+ x = (uint64_t)m_lo * n_lo + (bias ? m : 0);
+ x >>= 32;
+ x += (uint64_t)m_lo * n_hi;
+ x += (uint64_t)m_hi * n_lo;
+ x >>= 32;
+ x += (uint64_t)m_hi * n_hi;
} else {
- res = m + (uint64_t)m_lo * n_lo;
- res_lo = res >> 32;
- res_hi = (res_lo < m_hi);
- res = res_lo | ((uint64_t)res_hi << 32);
+ x = (uint64_t)m_lo * n_lo + (bias ? m_lo : 0);
+ y = (uint64_t)m_lo * n_hi + (uint32_t)(x >> 32) + (bias ? m_hi : 0);
+ x = (uint64_t)m_hi * n_hi + (uint32_t)(y >> 32);
+ y = (uint64_t)m_hi * n_lo + (uint32_t)y;
+ x += (uint32_t)(y >> 32);
}
- if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
- /* there can't be any overflow here */
- res += (uint64_t)m_lo * n_hi;
- res += (uint64_t)m_hi * n_lo;
- res >>= 32;
- } else {
- res += (uint64_t)m_lo * n_hi;
- tmp = res >> 32;
- res += (uint64_t)m_hi * n_lo;
- res_lo = res >> 32;
- res_hi = (res_lo < tmp);
- res = res_lo | ((uint64_t)res_hi << 32);
- }
-
- res += (uint64_t)m_hi * n_hi;
-
- return res;
+ return x;
}
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 80de699bf6af..a5cbbf3e26ec 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -540,6 +540,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#if !defined(inb) && !defined(_inb)
#define _inb _inb
+#ifdef CONFIG_HAS_IOPORT
static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -549,10 +550,15 @@ static inline u8 _inb(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u8 _inb(unsigned long addr)
+ __compiletime_error("inb()) requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(inw) && !defined(_inw)
#define _inw _inw
+#ifdef CONFIG_HAS_IOPORT
static inline u16 _inw(unsigned long addr)
{
u16 val;
@@ -562,10 +568,15 @@ static inline u16 _inw(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u16 _inw(unsigned long addr)
+ __compiletime_error("inw() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(inl) && !defined(_inl)
#define _inl _inl
+#ifdef CONFIG_HAS_IOPORT
static inline u32 _inl(unsigned long addr)
{
u32 val;
@@ -575,36 +586,55 @@ static inline u32 _inl(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u32 _inl(unsigned long addr)
+ __compiletime_error("inl() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outb) && !defined(_outb)
#define _outb _outb
+#ifdef CONFIG_HAS_IOPORT
static inline void _outb(u8 value, unsigned long addr)
{
__io_pbw();
__raw_writeb(value, PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outb(u8 value, unsigned long addr)
+ __compiletime_error("outb() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outw) && !defined(_outw)
#define _outw _outw
+#ifdef CONFIG_HAS_IOPORT
static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
__raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outw(u16 value, unsigned long addr)
+ __compiletime_error("outw() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outl) && !defined(_outl)
#define _outl _outl
+#ifdef CONFIG_HAS_IOPORT
static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
__raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outl(u32 value, unsigned long addr)
+ __compiletime_error("outl() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#include <linux/logic_pio.h>
@@ -688,53 +718,83 @@ static inline void outl_p(u32 value, unsigned long addr)
#ifndef insb
#define insb insb
+#ifdef CONFIG_HAS_IOPORT
static inline void insb(unsigned long addr, void *buffer, unsigned int count)
{
readsb(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insb(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insb() requires HAS_IOPORT");
+#endif
#endif
#ifndef insw
#define insw insw
+#ifdef CONFIG_HAS_IOPORT
static inline void insw(unsigned long addr, void *buffer, unsigned int count)
{
readsw(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insw(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insw() requires HAS_IOPORT");
+#endif
#endif
#ifndef insl
#define insl insl
+#ifdef CONFIG_HAS_IOPORT
static inline void insl(unsigned long addr, void *buffer, unsigned int count)
{
readsl(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insl(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insl() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsb
#define outsb outsb
+#ifdef CONFIG_HAS_IOPORT
static inline void outsb(unsigned long addr, const void *buffer,
unsigned int count)
{
writesb(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsb(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsb() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsw
#define outsw outsw
+#ifdef CONFIG_HAS_IOPORT
static inline void outsw(unsigned long addr, const void *buffer,
unsigned int count)
{
writesw(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsw(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsw() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsl
#define outsl outsl
+#ifdef CONFIG_HAS_IOPORT
static inline void outsl(unsigned long addr, const void *buffer,
unsigned int count)
{
writesl(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsl(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsl() requires HAS_IOPORT");
+#endif
#endif
#ifndef insb_p
@@ -1151,7 +1211,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
#endif
#ifndef memset_io
-#define memset_io memset_io
/**
* memset_io Set a range of I/O memory to a constant value
* @addr: The beginning of the I/O-memory range to set
@@ -1160,15 +1219,10 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
*
* Set a range of I/O memory to a given value.
*/
-static inline void memset_io(volatile void __iomem *addr, int value,
- size_t size)
-{
- memset(__io_virt(addr), value, size);
-}
+void memset_io(volatile void __iomem *addr, int val, size_t count);
#endif
#ifndef memcpy_fromio
-#define memcpy_fromio memcpy_fromio
/**
* memcpy_fromio Copy a block of data from I/O memory
* @dst: The (RAM) destination for the copy
@@ -1177,16 +1231,10 @@ static inline void memset_io(volatile void __iomem *addr, int value,
*
* Copy a block of data from I/O memory.
*/
-static inline void memcpy_fromio(void *buffer,
- const volatile void __iomem *addr,
- size_t size)
-{
- memcpy(buffer, __io_virt(addr), size);
-}
+void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count);
#endif
#ifndef memcpy_toio
-#define memcpy_toio memcpy_toio
/**
* memcpy_toio Copy a block of data into I/O memory
* @dst: The (I/O memory) destination for the copy
@@ -1195,11 +1243,7 @@ static inline void memcpy_fromio(void *buffer,
*
* Copy a block of data to I/O memory.
*/
-static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
- size_t size)
-{
- memcpy(__io_virt(addr), buffer, size);
-}
+void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count);
#endif
extern int devmem_is_allowed(unsigned long pfn);
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 6796abe1900e..6d1fb6162ac1 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -64,6 +64,19 @@ static inline int pfn_valid(unsigned long pfn)
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
+#ifdef CONFIG_DEBUG_VIRTUAL
+#define page_to_phys(page) \
+({ \
+ unsigned long __pfn = page_to_pfn(page); \
+ \
+ WARN_ON_ONCE(!pfn_valid(__pfn)); \
+ PFN_PHYS(__pfn); \
+})
+#else
+#define page_to_phys(page) PFN_PHYS(page_to_pfn(page))
+#endif /* CONFIG_DEBUG_VIRTUAL */
+#define phys_to_page(phys) pfn_to_page(PHYS_PFN(phys))
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index c835607f78ae..01dafd604188 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -12,8 +12,7 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
#endif /* __arch_get_k_vdso_data */
#ifndef __arch_update_vsyscall
-static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
- struct timekeeper *tk)
+static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata)
{
}
#endif /* __arch_update_vsyscall */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
index adf91a783b5c..5dcaf4ae904a 100644
--- a/include/asm-generic/vga.h
+++ b/include/asm-generic/vga.h
@@ -1,25 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@ucw.cz>
- */
#ifndef __ASM_GENERIC_VGA_H
#define __ASM_GENERIC_VGA_H
-
-/*
- * On most architectures that support VGA, we can just
- * recalculate addresses and then access the videoram
- * directly without any black magic.
- *
- * Everyone else needs to ioremap the address and use
- * proper I/O accesses.
- */
-#ifndef VGA_MAP_MEM
-#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
-#endif
-
-#define vga_readb(x) (*(x))
-#define vga_writeb(x, y) (*(y) = (x))
-
-#endif /* _ASM_GENERIC_VGA_H */
+#endif /* __ASM_GENERIC_VGA_H */
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 18a10cad07aa..cdf7da74bf2f 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -12,24 +12,19 @@
#include <linux/crypto.h>
/**
- * struct akcipher_request - public key request
+ * struct akcipher_request - public key cipher request
*
* @base: Common attributes for async crypto requests
* @src: Source data
- * For verify op this is signature + digest, in that case
- * total size of @src is @src_len + @dst_len.
- * @dst: Destination data (Should be NULL for verify op)
+ * @dst: Destination data
* @src_len: Size of the input buffer
- * For verify op it's size of signature part of @src, this part
- * is supposed to be operated by cipher.
- * @dst_len: Size of @dst buffer (for all ops except verify).
+ * @dst_len: Size of @dst buffer
* It needs to be at least as big as the expected result
* depending on the operation.
* After operation it will be updated with the actual size of the
* result.
* In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
- * For verify op this is size of digest part in @src.
* @__ctx: Start of private context data
*/
struct akcipher_request {
@@ -55,15 +50,8 @@ struct crypto_akcipher {
};
/**
- * struct akcipher_alg - generic public key algorithm
+ * struct akcipher_alg - generic public key cipher algorithm
*
- * @sign: Function performs a sign operation as defined by public key
- * algorithm. In case of error, where the dst_len was insufficient,
- * the req->dst_len will be updated to the size required for the
- * operation
- * @verify: Function performs a complete verify operation as defined by
- * public key algorithm, returning verification status. Requires
- * digest value as input parameter.
* @encrypt: Function performs an encrypt operation as defined by public key
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
@@ -94,8 +82,6 @@ struct crypto_akcipher {
* @base: Common crypto API algorithm data structure
*/
struct akcipher_alg {
- int (*sign)(struct akcipher_request *req);
- int (*verify)(struct akcipher_request *req);
int (*encrypt)(struct akcipher_request *req);
int (*decrypt)(struct akcipher_request *req);
int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key,
@@ -110,9 +96,9 @@ struct akcipher_alg {
};
/**
- * DOC: Generic Public Key API
+ * DOC: Generic Public Key Cipher API
*
- * The Public Key API is used with the algorithms of type
+ * The Public Key Cipher API is used with the algorithms of type
* CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
*/
@@ -243,10 +229,9 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
*
* @req: public key request
* @src: ptr to input scatter list
- * @dst: ptr to output scatter list or NULL for verify op
+ * @dst: ptr to output scatter list
* @src_len: size of the src input scatter list to be processed
- * @dst_len: size of the dst output scatter list or size of signature
- * portion in @src for verify op
+ * @dst_len: size of the dst output scatter list
*/
static inline void akcipher_request_set_crypt(struct akcipher_request *req,
struct scatterlist *src,
@@ -348,44 +333,6 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
void *dst, unsigned int dlen);
/**
- * crypto_akcipher_sign() - Invoke public key sign operation
- *
- * Function invokes the specific public key sign operation for a given
- * public key algorithm
- *
- * @req: asymmetric key request
- *
- * Return: zero on success; error code in case of error
- */
-static inline int crypto_akcipher_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- return crypto_akcipher_alg(tfm)->sign(req);
-}
-
-/**
- * crypto_akcipher_verify() - Invoke public key signature verification
- *
- * Function invokes the specific public key signature verification operation
- * for a given public key algorithm.
- *
- * @req: asymmetric key request
- *
- * Note: req->dst should be NULL, req->src should point to SG of size
- * (req->src_size + req->dst_size), containing signature (of req->src_size
- * length) with appended digest (of req->dst_size length).
- *
- * Return: zero on verification success; error code in case of error.
- */
-static inline int crypto_akcipher_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- return crypto_akcipher_alg(tfm)->verify(req);
-}
-
-/**
* crypto_akcipher_set_pub_key() - Invoke set public key operation
*
* Function invokes the algorithm specific set key function, which knows
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index a0fba4b2eccf..14ee62bc52b6 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -124,7 +124,7 @@ static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
/**
* crypto_register_akcipher() -- Register public key algorithm
*
- * Function registers an implementation of a public key verify algorithm
+ * Function registers an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*
@@ -135,7 +135,7 @@ int crypto_register_akcipher(struct akcipher_alg *alg);
/**
* crypto_unregister_akcipher() -- Unregister public key algorithm
*
- * Function unregisters an implementation of a public key verify algorithm
+ * Function unregisters an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*/
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index 065f00e4bf40..57cd75242141 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -42,6 +42,18 @@
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+/*
+ * The integers r and s making up the signature are expected to be
+ * formatted as two consecutive u64 arrays of size ECC_MAX_BYTES.
+ * The bytes within each u64 digit are in native endianness,
+ * but the order of the u64 digits themselves is little endian.
+ * This format allows direct use by internal vli_*() functions.
+ */
+struct ecdsa_raw_sig {
+ u64 r[ECC_MAX_DIGITS];
+ u64 s[ECC_MAX_DIGITS];
+};
+
/**
* ecc_swap_digits() - Copy ndigits from big endian array to native array
* @in: Input array
@@ -293,4 +305,6 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
const u64 *y, const struct ecc_point *q,
const struct ecc_curve *curve);
+extern struct crypto_template ecdsa_x962_tmpl;
+extern struct crypto_template ecdsa_p1363_tmpl;
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index e870133f4b77..071a1951b992 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -8,6 +8,7 @@
#ifndef _RSA_HELPER_
#define _RSA_HELPER_
#include <linux/types.h>
+#include <crypto/akcipher.h>
/**
* rsa_key - RSA key structure
@@ -53,5 +54,33 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len);
+#define RSA_PUB (true)
+#define RSA_PRIV (false)
+
+static inline int rsa_set_key(struct crypto_akcipher *child,
+ unsigned int *key_size, bool is_pubkey,
+ const void *key, unsigned int keylen)
+{
+ int err;
+
+ *key_size = 0;
+
+ if (is_pubkey)
+ err = crypto_akcipher_set_pub_key(child, key, keylen);
+ else
+ err = crypto_akcipher_set_priv_key(child, key, keylen);
+ if (err)
+ return err;
+
+ /* Find out new modulus size from rsa implementation */
+ err = crypto_akcipher_maxsize(child);
+ if (err > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ *key_size = err;
+ return 0;
+}
+
extern struct crypto_template rsa_pkcs1pad_tmpl;
+extern struct crypto_template rsassa_pkcs1_tmpl;
#endif
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
index 97cb26ef8115..b16648c1a986 100644
--- a/include/crypto/internal/sig.h
+++ b/include/crypto/internal/sig.h
@@ -10,8 +10,88 @@
#include <crypto/algapi.h>
#include <crypto/sig.h>
+struct sig_instance {
+ void (*free)(struct sig_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct sig_alg, base)];
+ struct crypto_instance base;
+ };
+ struct sig_alg alg;
+ };
+};
+
+struct crypto_sig_spawn {
+ struct crypto_spawn base;
+};
+
static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
+
+/**
+ * crypto_register_sig() -- Register public key signature algorithm
+ *
+ * Function registers an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_sig(struct sig_alg *alg);
+
+/**
+ * crypto_unregister_sig() -- Unregister public key signature algorithm
+ *
+ * Function unregisters an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_sig(struct sig_alg *alg);
+
+int sig_register_instance(struct crypto_template *tmpl,
+ struct sig_instance *inst);
+
+static inline struct sig_instance *sig_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct sig_instance, alg.base);
+}
+
+static inline struct sig_instance *sig_alg_instance(struct crypto_sig *tfm)
+{
+ return sig_instance(crypto_tfm_alg_instance(&tfm->base));
+}
+
+static inline struct crypto_instance *sig_crypto_instance(struct sig_instance
+ *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline void *sig_instance_ctx(struct sig_instance *inst)
+{
+ return crypto_instance_ctx(sig_crypto_instance(inst));
+}
+
+int crypto_grab_sig(struct crypto_sig_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline struct crypto_sig *crypto_spawn_sig(struct crypto_sig_spawn
+ *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_drop_sig(struct crypto_sig_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct sig_alg *crypto_spawn_sig_alg(struct crypto_sig_spawn
+ *spawn)
+{
+ return container_of(spawn->base.alg, struct sig_alg, base);
+}
#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index b7f308977c84..81098e00c08f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -104,9 +104,6 @@ static inline int restrict_link_by_digsig(struct key *dest_keyring,
extern int query_asymmetric_key(const struct kernel_pkey_params *,
struct kernel_pkey_query *);
-extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int create_signature(struct kernel_pkey_params *, const void *, void *);
extern int verify_signature(const struct key *,
const struct public_key_signature *);
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
index d25186bb2be3..cff41ad93824 100644
--- a/include/crypto/sig.h
+++ b/include/crypto/sig.h
@@ -20,6 +20,56 @@ struct crypto_sig {
};
/**
+ * struct sig_alg - generic public key signature algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. Optional.
+ * @verify: Function performs a complete verify operation as defined by
+ * public key algorithm, returning verification status. Optional.
+ * @set_pub_key: Function invokes the algorithm specific set public key
+ * function, which knows how to decode and interpret
+ * the BER encoded public key and parameters. Mandatory.
+ * @set_priv_key: Function invokes the algorithm specific set private key
+ * function, which knows how to decode and interpret
+ * the BER encoded private key and parameters. Optional.
+ * @key_size: Function returns key size. Mandatory.
+ * @digest_size: Function returns maximum digest size. Optional.
+ * @max_size: Function returns maximum signature size. Optional.
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct sig_alg {
+ int (*sign)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+ int (*verify)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+ int (*set_pub_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ int (*set_priv_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ unsigned int (*key_size)(struct crypto_sig *tfm);
+ unsigned int (*digest_size)(struct crypto_sig *tfm);
+ unsigned int (*max_size)(struct crypto_sig *tfm);
+ int (*init)(struct crypto_sig *tfm);
+ void (*exit)(struct crypto_sig *tfm);
+
+ struct crypto_alg base;
+};
+
+/**
* DOC: Generic Public Key Signature API
*
* The Public Key Signature API is used with the algorithms of type
@@ -47,6 +97,21 @@ static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
return &tfm->base;
}
+static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_sig, base);
+}
+
+static inline struct sig_alg *__crypto_sig_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct sig_alg, base);
+}
+
+static inline struct sig_alg *crypto_sig_alg(struct crypto_sig *tfm)
+{
+ return __crypto_sig_alg(crypto_sig_tfm(tfm)->__crt_alg);
+}
+
/**
* crypto_free_sig() - free signature tfm handle
*
@@ -60,16 +125,55 @@ static inline void crypto_free_sig(struct crypto_sig *tfm)
}
/**
- * crypto_sig_maxsize() - Get len for output buffer
+ * crypto_sig_keysize() - Get key size
+ *
+ * Function returns the key size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_keysize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->key_size(tfm);
+}
+
+/**
+ * crypto_sig_digestsize() - Get maximum digest size
+ *
+ * Function returns the maximum digest size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_digestsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->digest_size(tfm);
+}
+
+/**
+ * crypto_sig_maxsize() - Get maximum signature size
*
- * Function returns the dest buffer size required for a given key.
+ * Function returns the maximum signature size in bytes.
* Function assumes that the key is already set in the transformation. If this
- * function is called without a setkey or with a failed setkey, you will end up
+ * function is called without a setkey or with a failed setkey, you may end up
* in a NULL dereference.
*
* @tfm: signature tfm handle allocated with crypto_alloc_sig()
*/
-int crypto_sig_maxsize(struct crypto_sig *tfm);
+static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->max_size(tfm);
+}
/**
* crypto_sig_sign() - Invoke signing operation
@@ -84,9 +188,14 @@ int crypto_sig_maxsize(struct crypto_sig *tfm);
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_sign(struct crypto_sig *tfm,
- const void *src, unsigned int slen,
- void *dst, unsigned int dlen);
+static inline int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->sign(tfm, src, slen, dst, dlen);
+}
/**
* crypto_sig_verify() - Invoke signature verification
@@ -102,9 +211,14 @@ int crypto_sig_sign(struct crypto_sig *tfm,
*
* Return: zero on verification success; error code in case of error.
*/
-int crypto_sig_verify(struct crypto_sig *tfm,
- const void *src, unsigned int slen,
- const void *digest, unsigned int dlen);
+static inline int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->verify(tfm, src, slen, digest, dlen);
+}
/**
* crypto_sig_set_pubkey() - Invoke set public key operation
@@ -119,8 +233,13 @@ int crypto_sig_verify(struct crypto_sig *tfm,
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_set_pubkey(struct crypto_sig *tfm,
- const void *key, unsigned int keylen);
+static inline int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_pub_key(tfm, key, keylen);
+}
/**
* crypto_sig_set_privkey() - Invoke set private key operation
@@ -135,6 +254,11 @@ int crypto_sig_set_pubkey(struct crypto_sig *tfm,
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_set_privkey(struct crypto_sig *tfm,
- const void *key, unsigned int keylen);
+static inline int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_priv_key(tfm, key, keylen);
+}
#endif
diff --git a/include/drm/intel/i915_pciids.h b/include/drm/intel/i915_pciids.h
index 2bf03ebfcf73..f35534522d33 100644
--- a/include/drm/intel/i915_pciids.h
+++ b/include/drm/intel/i915_pciids.h
@@ -771,13 +771,24 @@
INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \
INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
-/* MTL */
-#define INTEL_ARL_IDS(MACRO__, ...) \
- MACRO__(0x7D41, ## __VA_ARGS__), \
+/* ARL */
+#define INTEL_ARL_H_IDS(MACRO__, ...) \
MACRO__(0x7D51, ## __VA_ARGS__), \
- MACRO__(0x7D67, ## __VA_ARGS__), \
MACRO__(0x7DD1, ## __VA_ARGS__)
+#define INTEL_ARL_U_IDS(MACRO__, ...) \
+ MACRO__(0x7D41, ## __VA_ARGS__) \
+
+#define INTEL_ARL_S_IDS(MACRO__, ...) \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0xB640, ## __VA_ARGS__)
+
+#define INTEL_ARL_IDS(MACRO__, ...) \
+ INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL */
#define INTEL_MTL_IDS(MACRO__, ...) \
INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \
MACRO__(0x7D40, ## __VA_ARGS__), \
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index 8332f8d82f96..e850dc3a1ad3 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -255,8 +255,10 @@
#define QCOM_ID_IPQ9510 521
#define QCOM_ID_QRB4210 523
#define QCOM_ID_QRB2210 524
+#define QCOM_ID_SAR2130P 525
#define QCOM_ID_SM8475 530
#define QCOM_ID_SM8475P 531
+#define QCOM_ID_SA8255P 532
#define QCOM_ID_SA8775P 534
#define QCOM_ID_QRU1000 539
#define QCOM_ID_SM8475_2 540
@@ -264,6 +266,7 @@
#define QCOM_ID_X1E80100 555
#define QCOM_ID_SM8650 557
#define QCOM_ID_SM4450 568
+#define QCOM_ID_SAR1130P 579
#define QCOM_ID_QDU1010 587
#define QCOM_ID_QRU1032 588
#define QCOM_ID_QRU1052 589
@@ -276,8 +279,12 @@
#define QCOM_ID_QCM8550 604
#define QCOM_ID_IPQ5300 624
#define QCOM_ID_IPQ5321 650
+#define QCOM_ID_IPQ5424 651
+#define QCOM_ID_IPQ5404 671
+#define QCOM_ID_QCS9100 667
#define QCOM_ID_QCS8300 674
#define QCOM_ID_QCS8275 675
+#define QCOM_ID_QCS615 680
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/clock/qcom,sa8775p-camcc.h b/include/dt-bindings/clock/qcom,sa8775p-camcc.h
new file mode 100644
index 000000000000..38531acd699f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-camcc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H
+
+/* CAM_CC clocks */
+#define CAM_CC_CAMNOC_AXI_CLK 0
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 1
+#define CAM_CC_CAMNOC_DCD_XO_CLK 2
+#define CAM_CC_CAMNOC_XO_CLK 3
+#define CAM_CC_CCI_0_CLK 4
+#define CAM_CC_CCI_0_CLK_SRC 5
+#define CAM_CC_CCI_1_CLK 6
+#define CAM_CC_CCI_1_CLK_SRC 7
+#define CAM_CC_CCI_2_CLK 8
+#define CAM_CC_CCI_2_CLK_SRC 9
+#define CAM_CC_CCI_3_CLK 10
+#define CAM_CC_CCI_3_CLK_SRC 11
+#define CAM_CC_CORE_AHB_CLK 12
+#define CAM_CC_CPAS_AHB_CLK 13
+#define CAM_CC_CPAS_FAST_AHB_CLK 14
+#define CAM_CC_CPAS_IFE_0_CLK 15
+#define CAM_CC_CPAS_IFE_1_CLK 16
+#define CAM_CC_CPAS_IFE_LITE_CLK 17
+#define CAM_CC_CPAS_IPE_CLK 18
+#define CAM_CC_CPAS_SFE_LITE_0_CLK 19
+#define CAM_CC_CPAS_SFE_LITE_1_CLK 20
+#define CAM_CC_CPHY_RX_CLK_SRC 21
+#define CAM_CC_CSI0PHYTIMER_CLK 22
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 23
+#define CAM_CC_CSI1PHYTIMER_CLK 24
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 25
+#define CAM_CC_CSI2PHYTIMER_CLK 26
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI3PHYTIMER_CLK 28
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSID_CLK 30
+#define CAM_CC_CSID_CLK_SRC 31
+#define CAM_CC_CSID_CSIPHY_RX_CLK 32
+#define CAM_CC_CSIPHY0_CLK 33
+#define CAM_CC_CSIPHY1_CLK 34
+#define CAM_CC_CSIPHY2_CLK 35
+#define CAM_CC_CSIPHY3_CLK 36
+#define CAM_CC_FAST_AHB_CLK_SRC 37
+#define CAM_CC_GDSC_CLK 38
+#define CAM_CC_ICP_AHB_CLK 39
+#define CAM_CC_ICP_CLK 40
+#define CAM_CC_ICP_CLK_SRC 41
+#define CAM_CC_IFE_0_CLK 42
+#define CAM_CC_IFE_0_CLK_SRC 43
+#define CAM_CC_IFE_0_FAST_AHB_CLK 44
+#define CAM_CC_IFE_1_CLK 45
+#define CAM_CC_IFE_1_CLK_SRC 46
+#define CAM_CC_IFE_1_FAST_AHB_CLK 47
+#define CAM_CC_IFE_LITE_AHB_CLK 48
+#define CAM_CC_IFE_LITE_CLK 49
+#define CAM_CC_IFE_LITE_CLK_SRC 50
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 51
+#define CAM_CC_IFE_LITE_CSID_CLK 52
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 53
+#define CAM_CC_IPE_AHB_CLK 54
+#define CAM_CC_IPE_CLK 55
+#define CAM_CC_IPE_CLK_SRC 56
+#define CAM_CC_IPE_FAST_AHB_CLK 57
+#define CAM_CC_MCLK0_CLK 58
+#define CAM_CC_MCLK0_CLK_SRC 59
+#define CAM_CC_MCLK1_CLK 60
+#define CAM_CC_MCLK1_CLK_SRC 61
+#define CAM_CC_MCLK2_CLK 62
+#define CAM_CC_MCLK2_CLK_SRC 63
+#define CAM_CC_MCLK3_CLK 64
+#define CAM_CC_MCLK3_CLK_SRC 65
+#define CAM_CC_PLL0 66
+#define CAM_CC_PLL0_OUT_EVEN 67
+#define CAM_CC_PLL0_OUT_ODD 68
+#define CAM_CC_PLL2 69
+#define CAM_CC_PLL3 70
+#define CAM_CC_PLL3_OUT_EVEN 71
+#define CAM_CC_PLL4 72
+#define CAM_CC_PLL4_OUT_EVEN 73
+#define CAM_CC_PLL5 74
+#define CAM_CC_PLL5_OUT_EVEN 75
+#define CAM_CC_SFE_LITE_0_CLK 76
+#define CAM_CC_SFE_LITE_0_FAST_AHB_CLK 77
+#define CAM_CC_SFE_LITE_1_CLK 78
+#define CAM_CC_SFE_LITE_1_FAST_AHB_CLK 79
+#define CAM_CC_SLEEP_CLK 80
+#define CAM_CC_SLEEP_CLK_SRC 81
+#define CAM_CC_SLOW_AHB_CLK_SRC 82
+#define CAM_CC_SM_OBS_CLK 83
+#define CAM_CC_XO_CLK_SRC 84
+#define CAM_CC_QDSS_DEBUG_XO_CLK 85
+
+/* CAM_CC power domains */
+#define CAM_CC_TITAN_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_ICP_BCR 0
+#define CAM_CC_IFE_0_BCR 1
+#define CAM_CC_IFE_1_BCR 2
+#define CAM_CC_IPE_0_BCR 3
+#define CAM_CC_SFE_LITE_0_BCR 4
+#define CAM_CC_SFE_LITE_1_BCR 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-dispcc.h b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h
new file mode 100644
index 000000000000..e2049e510658
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H
+
+/* DISP_CC_0/1 clocks */
+#define MDSS_DISP_CC_MDSS_AHB1_CLK 0
+#define MDSS_DISP_CC_MDSS_AHB_CLK 1
+#define MDSS_DISP_CC_MDSS_AHB_CLK_SRC 2
+#define MDSS_DISP_CC_MDSS_BYTE0_CLK 3
+#define MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define MDSS_DISP_CC_MDSS_BYTE1_CLK 7
+#define MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC 14
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK 23
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC 24
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK 25
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC 26
+#define MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27
+#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK 28
+#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29
+#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30
+#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC 31
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK 32
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 33
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 34
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 35
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK 36
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 37
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK 38
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 39
+#define MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 40
+#define MDSS_DISP_CC_MDSS_ESC0_CLK 41
+#define MDSS_DISP_CC_MDSS_ESC0_CLK_SRC 42
+#define MDSS_DISP_CC_MDSS_ESC1_CLK 43
+#define MDSS_DISP_CC_MDSS_ESC1_CLK_SRC 44
+#define MDSS_DISP_CC_MDSS_MDP1_CLK 45
+#define MDSS_DISP_CC_MDSS_MDP_CLK 46
+#define MDSS_DISP_CC_MDSS_MDP_CLK_SRC 47
+#define MDSS_DISP_CC_MDSS_MDP_LUT1_CLK 48
+#define MDSS_DISP_CC_MDSS_MDP_LUT_CLK 49
+#define MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK 50
+#define MDSS_DISP_CC_MDSS_PCLK0_CLK 51
+#define MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC 52
+#define MDSS_DISP_CC_MDSS_PCLK1_CLK 53
+#define MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC 54
+#define MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK 55
+#define MDSS_DISP_CC_MDSS_RSCC_AHB_CLK 56
+#define MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK 57
+#define MDSS_DISP_CC_MDSS_VSYNC1_CLK 58
+#define MDSS_DISP_CC_MDSS_VSYNC_CLK 59
+#define MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC 60
+#define MDSS_DISP_CC_PLL0 61
+#define MDSS_DISP_CC_PLL1 62
+#define MDSS_DISP_CC_SLEEP_CLK 63
+#define MDSS_DISP_CC_SLEEP_CLK_SRC 64
+#define MDSS_DISP_CC_SM_OBS_CLK 65
+#define MDSS_DISP_CC_XO_CLK 66
+#define MDSS_DISP_CC_XO_CLK_SRC 67
+
+/* DISP_CC_0/1 power domains */
+#define MDSS_DISP_CC_MDSS_CORE_GDSC 0
+#define MDSS_DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC_0/1 resets */
+#define MDSS_DISP_CC_MDSS_CORE_BCR 0
+#define MDSS_DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-videocc.h b/include/dt-bindings/clock/qcom,sa8775p-videocc.h
new file mode 100644
index 000000000000..e6325f68c317
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-videocc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_AHB_CLK 0
+#define VIDEO_CC_AHB_CLK_SRC 1
+#define VIDEO_CC_MVS0_CLK 2
+#define VIDEO_CC_MVS0_CLK_SRC 3
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS0C_CLK 5
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 6
+#define VIDEO_CC_MVS1_CLK 7
+#define VIDEO_CC_MVS1_CLK_SRC 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_PLL_LOCK_MONITOR_CLK 12
+#define VIDEO_CC_SLEEP_CLK 13
+#define VIDEO_CC_SLEEP_CLK_SRC 14
+#define VIDEO_CC_SM_DIV_CLK_SRC 15
+#define VIDEO_CC_SM_OBS_CLK 16
+#define VIDEO_CC_XO_CLK 17
+#define VIDEO_CC_XO_CLK_SRC 18
+#define VIDEO_PLL0 19
+#define VIDEO_PLL1 20
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0C_GDSC 0
+#define VIDEO_CC_MVS0_GDSC 1
+#define VIDEO_CC_MVS1C_GDSC 2
+#define VIDEO_CC_MVS1_GDSC 3
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+#define VIDEO_CC_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_MVS1C_BCR 6
+
+#endif
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
index 8281e9caf3a9..311521fe4b59 100644
--- a/include/dt-bindings/clock/r9a08g045-cpg.h
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -308,5 +308,6 @@
#define R9A08G045_PD_DDR 64
#define R9A08G045_PD_TZCDDR 65
#define R9A08G045_PD_OTFDE_DDR 66
+#define R9A08G045_PD_RTC 67
#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h
new file mode 100644
index 000000000000..67774eafad06
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__
+#define __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__
+
+#define VBATTB_XC 0
+#define VBATTB_XBYP 1
+#define VBATTB_MUX 2
+#define VBATTB_VBATTCLK 3
+
+#endif /* __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ */
diff --git a/include/dt-bindings/clock/samsung,exynos8895.h b/include/dt-bindings/clock/samsung,exynos8895.h
new file mode 100644
index 000000000000..27998c53f929
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos8895.h
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Device Tree binding constants for Exynos8895 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS8895_H
+#define _DT_BINDINGS_CLOCK_EXYNOS8895_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SHARED4 10
+#define CLK_MOUT_CP2AP_MIF_CLK_USER 11
+#define CLK_MOUT_CMU_ABOX_CPUABOX 12
+#define CLK_MOUT_CMU_APM_BUS 13
+#define CLK_MOUT_CMU_BUS1_BUS 14
+#define CLK_MOUT_CMU_BUSC_BUS 15
+#define CLK_MOUT_CMU_BUSC_BUSPHSI2C 16
+#define CLK_MOUT_CMU_CAM_BUS 17
+#define CLK_MOUT_CMU_CAM_TPU0 18
+#define CLK_MOUT_CMU_CAM_TPU1 19
+#define CLK_MOUT_CMU_CAM_VRA 20
+#define CLK_MOUT_CMU_CIS_CLK0 21
+#define CLK_MOUT_CMU_CIS_CLK1 22
+#define CLK_MOUT_CMU_CIS_CLK2 23
+#define CLK_MOUT_CMU_CIS_CLK3 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 26
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 27
+#define CLK_MOUT_CMU_DBG_BUS 28
+#define CLK_MOUT_CMU_DCAM_BUS 29
+#define CLK_MOUT_CMU_DCAM_IMGD 30
+#define CLK_MOUT_CMU_DPU_BUS 31
+#define CLK_MOUT_CMU_DROOPDETECTOR 32
+#define CLK_MOUT_CMU_DSP_BUS 33
+#define CLK_MOUT_CMU_FSYS0_BUS 34
+#define CLK_MOUT_CMU_FSYS0_DPGTC 35
+#define CLK_MOUT_CMU_FSYS0_MMC_EMBD 36
+#define CLK_MOUT_CMU_FSYS0_UFS_EMBD 37
+#define CLK_MOUT_CMU_FSYS0_USBDRD30 38
+#define CLK_MOUT_CMU_FSYS1_BUS 39
+#define CLK_MOUT_CMU_FSYS1_MMC_CARD 40
+#define CLK_MOUT_CMU_FSYS1_PCIE 41
+#define CLK_MOUT_CMU_FSYS1_UFS_CARD 42
+#define CLK_MOUT_CMU_G2D_G2D 43
+#define CLK_MOUT_CMU_G2D_JPEG 44
+#define CLK_MOUT_CMU_HPM 45
+#define CLK_MOUT_CMU_IMEM_BUS 46
+#define CLK_MOUT_CMU_ISPHQ_BUS 47
+#define CLK_MOUT_CMU_ISPLP_BUS 48
+#define CLK_MOUT_CMU_IVA_BUS 49
+#define CLK_MOUT_CMU_MFC_BUS 50
+#define CLK_MOUT_CMU_MIF_SWITCH 51
+#define CLK_MOUT_CMU_PERIC0_BUS 52
+#define CLK_MOUT_CMU_PERIC0_UART_DBG 53
+#define CLK_MOUT_CMU_PERIC0_USI00 54
+#define CLK_MOUT_CMU_PERIC0_USI01 55
+#define CLK_MOUT_CMU_PERIC0_USI02 56
+#define CLK_MOUT_CMU_PERIC0_USI03 57
+#define CLK_MOUT_CMU_PERIC1_BUS 58
+#define CLK_MOUT_CMU_PERIC1_SPEEDY2 59
+#define CLK_MOUT_CMU_PERIC1_SPI_CAM0 60
+#define CLK_MOUT_CMU_PERIC1_SPI_CAM1 61
+#define CLK_MOUT_CMU_PERIC1_UART_BT 62
+#define CLK_MOUT_CMU_PERIC1_USI04 63
+#define CLK_MOUT_CMU_PERIC1_USI05 64
+#define CLK_MOUT_CMU_PERIC1_USI06 65
+#define CLK_MOUT_CMU_PERIC1_USI07 66
+#define CLK_MOUT_CMU_PERIC1_USI08 67
+#define CLK_MOUT_CMU_PERIC1_USI09 68
+#define CLK_MOUT_CMU_PERIC1_USI10 69
+#define CLK_MOUT_CMU_PERIC1_USI11 70
+#define CLK_MOUT_CMU_PERIC1_USI12 71
+#define CLK_MOUT_CMU_PERIC1_USI13 72
+#define CLK_MOUT_CMU_PERIS_BUS 73
+#define CLK_MOUT_CMU_SRDZ_BUS 74
+#define CLK_MOUT_CMU_SRDZ_IMGD 75
+#define CLK_MOUT_CMU_VPU_BUS 76
+#define CLK_DOUT_CMU_ABOX_CPUABOX 77
+#define CLK_DOUT_CMU_APM_BUS 78
+#define CLK_DOUT_CMU_BUS1_BUS 79
+#define CLK_DOUT_CMU_BUSC_BUS 80
+#define CLK_DOUT_CMU_BUSC_BUSPHSI2C 81
+#define CLK_DOUT_CMU_CAM_BUS 82
+#define CLK_DOUT_CMU_CAM_TPU0 83
+#define CLK_DOUT_CMU_CAM_TPU1 84
+#define CLK_DOUT_CMU_CAM_VRA 85
+#define CLK_DOUT_CMU_CIS_CLK0 86
+#define CLK_DOUT_CMU_CIS_CLK1 87
+#define CLK_DOUT_CMU_CIS_CLK2 88
+#define CLK_DOUT_CMU_CIS_CLK3 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 91
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 92
+#define CLK_DOUT_CMU_DBG_BUS 93
+#define CLK_DOUT_CMU_DCAM_BUS 94
+#define CLK_DOUT_CMU_DCAM_IMGD 95
+#define CLK_DOUT_CMU_DPU_BUS 96
+#define CLK_DOUT_CMU_DSP_BUS 97
+#define CLK_DOUT_CMU_FSYS0_BUS 98
+#define CLK_DOUT_CMU_FSYS0_DPGTC 99
+#define CLK_DOUT_CMU_FSYS0_MMC_EMBD 100
+#define CLK_DOUT_CMU_FSYS0_UFS_EMBD 101
+#define CLK_DOUT_CMU_FSYS0_USBDRD30 102
+#define CLK_DOUT_CMU_FSYS1_BUS 103
+#define CLK_DOUT_CMU_FSYS1_MMC_CARD 104
+#define CLK_DOUT_CMU_FSYS1_UFS_CARD 105
+#define CLK_DOUT_CMU_G2D_G2D 106
+#define CLK_DOUT_CMU_G2D_JPEG 107
+#define CLK_DOUT_CMU_G3D_SWITCH 108
+#define CLK_DOUT_CMU_HPM 109
+#define CLK_DOUT_CMU_IMEM_BUS 110
+#define CLK_DOUT_CMU_ISPHQ_BUS 111
+#define CLK_DOUT_CMU_ISPLP_BUS 112
+#define CLK_DOUT_CMU_IVA_BUS 113
+#define CLK_DOUT_CMU_MFC_BUS 114
+#define CLK_DOUT_CMU_MODEM_SHARED0 115
+#define CLK_DOUT_CMU_MODEM_SHARED1 116
+#define CLK_DOUT_CMU_PERIC0_BUS 117
+#define CLK_DOUT_CMU_PERIC0_UART_DBG 118
+#define CLK_DOUT_CMU_PERIC0_USI00 119
+#define CLK_DOUT_CMU_PERIC0_USI01 120
+#define CLK_DOUT_CMU_PERIC0_USI02 121
+#define CLK_DOUT_CMU_PERIC0_USI03 122
+#define CLK_DOUT_CMU_PERIC1_BUS 123
+#define CLK_DOUT_CMU_PERIC1_SPEEDY2 124
+#define CLK_DOUT_CMU_PERIC1_SPI_CAM0 125
+#define CLK_DOUT_CMU_PERIC1_SPI_CAM1 126
+#define CLK_DOUT_CMU_PERIC1_UART_BT 127
+#define CLK_DOUT_CMU_PERIC1_USI04 128
+#define CLK_DOUT_CMU_PERIC1_USI05 129
+#define CLK_DOUT_CMU_PERIC1_USI06 130
+#define CLK_DOUT_CMU_PERIC1_USI07 131
+#define CLK_DOUT_CMU_PERIC1_USI08 132
+#define CLK_DOUT_CMU_PERIC1_USI09 133
+#define CLK_DOUT_CMU_PERIC1_USI10 134
+#define CLK_DOUT_CMU_PERIC1_USI11 135
+#define CLK_DOUT_CMU_PERIC1_USI12 136
+#define CLK_DOUT_CMU_PERIC1_USI13 137
+#define CLK_DOUT_CMU_PERIS_BUS 138
+#define CLK_DOUT_CMU_SRDZ_BUS 139
+#define CLK_DOUT_CMU_SRDZ_IMGD 140
+#define CLK_DOUT_CMU_VPU_BUS 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV4 143
+#define CLK_DOUT_CMU_SHARED1_DIV2 144
+#define CLK_DOUT_CMU_SHARED1_DIV4 145
+#define CLK_DOUT_CMU_SHARED2_DIV2 146
+#define CLK_DOUT_CMU_SHARED3_DIV2 147
+#define CLK_DOUT_CMU_SHARED4_DIV2 148
+#define CLK_DOUT_CMU_FSYS1_PCIE 149
+#define CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2 150
+#define CLK_DOUT_CMU_CMU_OTP 151
+#define CLK_GOUT_CMU_DROOPDETECTOR 152
+#define CLK_GOUT_CMU_MIF_SWITCH 153
+#define CLK_GOUT_CMU_ABOX_CPUABOX 154
+#define CLK_GOUT_CMU_APM_BUS 155
+#define CLK_GOUT_CMU_BUS1_BUS 156
+#define CLK_GOUT_CMU_BUSC_BUS 157
+#define CLK_GOUT_CMU_BUSC_BUSPHSI2C 158
+#define CLK_GOUT_CMU_CAM_BUS 159
+#define CLK_GOUT_CMU_CAM_TPU0 160
+#define CLK_GOUT_CMU_CAM_TPU1 161
+#define CLK_GOUT_CMU_CAM_VRA 162
+#define CLK_GOUT_CMU_CIS_CLK0 163
+#define CLK_GOUT_CMU_CIS_CLK1 164
+#define CLK_GOUT_CMU_CIS_CLK2 165
+#define CLK_GOUT_CMU_CIS_CLK3 166
+#define CLK_GOUT_CMU_CORE_BUS 167
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 168
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 169
+#define CLK_GOUT_CMU_DBG_BUS 170
+#define CLK_GOUT_CMU_DCAM_BUS 171
+#define CLK_GOUT_CMU_DCAM_IMGD 172
+#define CLK_GOUT_CMU_DPU_BUS 173
+#define CLK_GOUT_CMU_DSP_BUS 174
+#define CLK_GOUT_CMU_FSYS0_BUS 175
+#define CLK_GOUT_CMU_FSYS0_DPGTC 176
+#define CLK_GOUT_CMU_FSYS0_MMC_EMBD 177
+#define CLK_GOUT_CMU_FSYS0_UFS_EMBD 178
+#define CLK_GOUT_CMU_FSYS0_USBDRD30 179
+#define CLK_GOUT_CMU_FSYS1_BUS 180
+#define CLK_GOUT_CMU_FSYS1_MMC_CARD 181
+#define CLK_GOUT_CMU_FSYS1_PCIE 182
+#define CLK_GOUT_CMU_FSYS1_UFS_CARD 183
+#define CLK_GOUT_CMU_G2D_G2D 184
+#define CLK_GOUT_CMU_G2D_JPEG 185
+#define CLK_GOUT_CMU_G3D_SWITCH 186
+#define CLK_GOUT_CMU_HPM 187
+#define CLK_GOUT_CMU_IMEM_BUS 188
+#define CLK_GOUT_CMU_ISPHQ_BUS 189
+#define CLK_GOUT_CMU_ISPLP_BUS 190
+#define CLK_GOUT_CMU_IVA_BUS 191
+#define CLK_GOUT_CMU_MFC_BUS 192
+#define CLK_GOUT_CMU_MODEM_SHARED0 193
+#define CLK_GOUT_CMU_MODEM_SHARED1 194
+#define CLK_GOUT_CMU_PERIC0_BUS 195
+#define CLK_GOUT_CMU_PERIC0_UART_DBG 196
+#define CLK_GOUT_CMU_PERIC0_USI00 197
+#define CLK_GOUT_CMU_PERIC0_USI01 198
+#define CLK_GOUT_CMU_PERIC0_USI02 199
+#define CLK_GOUT_CMU_PERIC0_USI03 200
+#define CLK_GOUT_CMU_PERIC1_BUS 201
+#define CLK_GOUT_CMU_PERIC1_SPEEDY2 202
+#define CLK_GOUT_CMU_PERIC1_SPI_CAM0 203
+#define CLK_GOUT_CMU_PERIC1_SPI_CAM1 204
+#define CLK_GOUT_CMU_PERIC1_UART_BT 205
+#define CLK_GOUT_CMU_PERIC1_USI04 206
+#define CLK_GOUT_CMU_PERIC1_USI05 207
+#define CLK_GOUT_CMU_PERIC1_USI06 208
+#define CLK_GOUT_CMU_PERIC1_USI07 209
+#define CLK_GOUT_CMU_PERIC1_USI08 210
+#define CLK_GOUT_CMU_PERIC1_USI09 211
+#define CLK_GOUT_CMU_PERIC1_USI10 212
+#define CLK_GOUT_CMU_PERIC1_USI11 213
+#define CLK_GOUT_CMU_PERIC1_USI12 214
+#define CLK_GOUT_CMU_PERIC1_USI13 215
+#define CLK_GOUT_CMU_PERIS_BUS 216
+#define CLK_GOUT_CMU_SRDZ_BUS 217
+#define CLK_GOUT_CMU_SRDZ_IMGD 218
+#define CLK_GOUT_CMU_VPU_BUS 219
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_MOUT_PERIS_GIC 2
+#define CLK_GOUT_PERIS_CMU_PERIS_PCLK 3
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM 4
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS 5
+#define CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK 6
+#define CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK 7
+#define CLK_GOUT_PERIS_BUSIF_TMU_PCLK 8
+#define CLK_GOUT_PERIS_GIC_CLK 9
+#define CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK 10
+#define CLK_GOUT_PERIS_MCT_PCLK 11
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK 12
+#define CLK_GOUT_PERIS_OTP_CON_TOP_PCLK 13
+#define CLK_GOUT_PERIS_PMU_PERIS_PCLK 14
+#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK 15
+#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK 16
+#define CLK_GOUT_PERIS_SYSREG_PERIS_PCLK 17
+#define CLK_GOUT_PERIS_TZPC00_PCLK 18
+#define CLK_GOUT_PERIS_TZPC01_PCLK 19
+#define CLK_GOUT_PERIS_TZPC02_PCLK 20
+#define CLK_GOUT_PERIS_TZPC03_PCLK 21
+#define CLK_GOUT_PERIS_TZPC04_PCLK 22
+#define CLK_GOUT_PERIS_TZPC05_PCLK 23
+#define CLK_GOUT_PERIS_TZPC06_PCLK 24
+#define CLK_GOUT_PERIS_TZPC07_PCLK 25
+#define CLK_GOUT_PERIS_TZPC08_PCLK 26
+#define CLK_GOUT_PERIS_TZPC09_PCLK 27
+#define CLK_GOUT_PERIS_TZPC10_PCLK 28
+#define CLK_GOUT_PERIS_TZPC11_PCLK 29
+#define CLK_GOUT_PERIS_TZPC12_PCLK 30
+#define CLK_GOUT_PERIS_TZPC13_PCLK 31
+#define CLK_GOUT_PERIS_TZPC14_PCLK 32
+#define CLK_GOUT_PERIS_TZPC15_PCLK 33
+#define CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK 34
+#define CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK 35
+#define CLK_GOUT_PERIS_XIU_P_PERIS_ACLK 36
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_DPGTC_USER 2
+#define CLK_MOUT_FSYS0_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS0_UFS_EMBD_USER 4
+#define CLK_MOUT_FSYS0_USBDRD30_USER 5
+#define CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK 6
+#define CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK 7
+#define CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK 8
+#define CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK 9
+#define CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK 10
+#define CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK 11
+#define CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK 12
+#define CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK 13
+#define CLK_GOUT_FSYS0_DP_LINK_I_PCLK 14
+#define CLK_GOUT_FSYS0_ETR_MIU_I_ACLK 15
+#define CLK_GOUT_FSYS0_ETR_MIU_I_PCLK 16
+#define CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK 17
+#define CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK 18
+#define CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK 19
+#define CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK 20
+#define CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK 21
+#define CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK 22
+#define CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN 23
+#define CLK_GOUT_FSYS0_PMU_FSYS0_PCLK 24
+#define CLK_GOUT_FSYS0_BCM_FSYS0_ACLK 25
+#define CLK_GOUT_FSYS0_BCM_FSYS0_PCLK 26
+#define CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK 27
+#define CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK 28
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK 29
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO 30
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK 31
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK 32
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK 33
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK 34
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK 35
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK 36
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK 37
+#define CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK 38
+#define CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK 39
+#define CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK 40
+#define CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK 41
+
+/* CMU_FSYS1 */
+#define CLK_MOUT_FSYS1_BUS_USER 1
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS1_PCIE_USER 3
+#define CLK_MOUT_FSYS1_UFS_CARD_USER 4
+#define CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN 5
+#define CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM 6
+#define CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK 7
+#define CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK 8
+#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK 9
+#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK 10
+#define CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK 11
+#define CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK 12
+#define CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK 13
+#define CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK 14
+#define CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK 15
+#define CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK 16
+#define CLK_GOUT_FSYS1_MMC_CARD_I_ACLK 17
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 18
+#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0 19
+#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1 20
+#define CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 21
+#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0 22
+#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1 23
+#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 24
+#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 25
+#define CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 26
+#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0 27
+#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1 28
+#define CLK_GOUT_FSYS1_PMU_FSYS1_PCLK 29
+#define CLK_GOUT_FSYS1_BCM_FSYS1_ACLK 30
+#define CLK_GOUT_FSYS1_BCM_FSYS1_PCLK 31
+#define CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK 32
+#define CLK_GOUT_FSYS1_RTIC_I_ACLK 33
+#define CLK_GOUT_FSYS1_RTIC_I_PCLK 34
+#define CLK_GOUT_FSYS1_SSS_I_ACLK 35
+#define CLK_GOUT_FSYS1_SSS_I_PCLK 36
+#define CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK 37
+#define CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK 38
+#define CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK 39
+#define CLK_GOUT_FSYS1_UFS_CARD_I_ACLK 40
+#define CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO 41
+#define CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK 42
+#define CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK 43
+#define CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK 44
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_UART_DBG_USER 2
+#define CLK_MOUT_PERIC0_USI00_USER 3
+#define CLK_MOUT_PERIC0_USI01_USER 4
+#define CLK_MOUT_PERIC0_USI02_USER 5
+#define CLK_MOUT_PERIC0_USI03_USER 6
+#define CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK 7
+#define CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK 8
+#define CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK 9
+#define CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK 10
+#define CLK_GOUT_PERIC0_PMU_PERIC0_PCLK 11
+#define CLK_GOUT_PERIC0_PWM_I_PCLK_S0 12
+#define CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK 13
+#define CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK 14
+#define CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK 15
+#define CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK 16
+#define CLK_GOUT_PERIC0_UART_DBG_PCLK 17
+#define CLK_GOUT_PERIC0_USI00_I_PCLK 18
+#define CLK_GOUT_PERIC0_USI00_I_SCLK_USI 19
+#define CLK_GOUT_PERIC0_USI01_I_PCLK 20
+#define CLK_GOUT_PERIC0_USI01_I_SCLK_USI 21
+#define CLK_GOUT_PERIC0_USI02_I_PCLK 22
+#define CLK_GOUT_PERIC0_USI02_I_SCLK_USI 23
+#define CLK_GOUT_PERIC0_USI03_I_PCLK 24
+#define CLK_GOUT_PERIC0_USI03_I_SCLK_USI 25
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_SPEEDY2_USER 2
+#define CLK_MOUT_PERIC1_SPI_CAM0_USER 3
+#define CLK_MOUT_PERIC1_SPI_CAM1_USER 4
+#define CLK_MOUT_PERIC1_UART_BT_USER 5
+#define CLK_MOUT_PERIC1_USI04_USER 6
+#define CLK_MOUT_PERIC1_USI05_USER 7
+#define CLK_MOUT_PERIC1_USI06_USER 8
+#define CLK_MOUT_PERIC1_USI07_USER 9
+#define CLK_MOUT_PERIC1_USI08_USER 10
+#define CLK_MOUT_PERIC1_USI09_USER 11
+#define CLK_MOUT_PERIC1_USI10_USER 12
+#define CLK_MOUT_PERIC1_USI11_USER 13
+#define CLK_MOUT_PERIC1_USI12_USER 14
+#define CLK_MOUT_PERIC1_USI13_USER 15
+#define CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK 16
+#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK 17
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK 18
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK 19
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK 20
+#define CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK 21
+#define CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK 22
+#define CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK 23
+#define CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK 24
+#define CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK 25
+#define CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK 26
+#define CLK_GOUT_PERIC1_PMU_PERIC1_PCLK 27
+#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK 28
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK 29
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK 30
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK 31
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK 32
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK 33
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK 34
+#define CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK 35
+#define CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK 36
+#define CLK_GOUT_PERIC1_SPI_CAM0_PCLK 37
+#define CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK 38
+#define CLK_GOUT_PERIC1_SPI_CAM1_PCLK 39
+#define CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK 40
+#define CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK 41
+#define CLK_GOUT_PERIC1_UART_BT_EXT_UCLK 42
+#define CLK_GOUT_PERIC1_UART_BT_PCLK 43
+#define CLK_GOUT_PERIC1_USI04_I_PCLK 44
+#define CLK_GOUT_PERIC1_USI04_I_SCLK_USI 45
+#define CLK_GOUT_PERIC1_USI05_I_PCLK 46
+#define CLK_GOUT_PERIC1_USI05_I_SCLK_USI 47
+#define CLK_GOUT_PERIC1_USI06_I_PCLK 48
+#define CLK_GOUT_PERIC1_USI06_I_SCLK_USI 49
+#define CLK_GOUT_PERIC1_USI07_I_PCLK 50
+#define CLK_GOUT_PERIC1_USI07_I_SCLK_USI 51
+#define CLK_GOUT_PERIC1_USI08_I_PCLK 52
+#define CLK_GOUT_PERIC1_USI08_I_SCLK_USI 53
+#define CLK_GOUT_PERIC1_USI09_I_PCLK 54
+#define CLK_GOUT_PERIC1_USI09_I_SCLK_USI 55
+#define CLK_GOUT_PERIC1_USI10_I_PCLK 56
+#define CLK_GOUT_PERIC1_USI10_I_SCLK_USI 57
+#define CLK_GOUT_PERIC1_USI11_I_PCLK 58
+#define CLK_GOUT_PERIC1_USI11_I_SCLK_USI 59
+#define CLK_GOUT_PERIC1_USI12_I_PCLK 60
+#define CLK_GOUT_PERIC1_USI12_I_SCLK_USI 61
+#define CLK_GOUT_PERIC1_USI13_I_PCLK 62
+#define CLK_GOUT_PERIC1_USI13_I_SCLK_USI 63
+#define CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK 64
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS8895_H */
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
index c720f344b6bf..0c681f2ba3d0 100644
--- a/include/dt-bindings/clock/samsung,exynosautov920.h
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -160,6 +160,7 @@
#define DOUT_CLKCMU_SNW_NOC 144
#define DOUT_CLKCMU_SSP_NOC 145
#define DOUT_CLKCMU_TAA_NOC 146
+#define DOUT_TCXO_DIV2 147
/* CMU_PERIC0 */
#define CLK_MOUT_PERIC0_IP_USER 1
@@ -188,4 +189,50 @@
#define CLK_DOUT_PERIC0_USI_I2C 23
#define CLK_DOUT_PERIC0_I3C 24
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_IP_USER 1
+#define CLK_MOUT_PERIC1_NOC_USER 2
+#define CLK_MOUT_PERIC1_USI09_USI 3
+#define CLK_MOUT_PERIC1_USI10_USI 4
+#define CLK_MOUT_PERIC1_USI11_USI 5
+#define CLK_MOUT_PERIC1_USI12_USI 6
+#define CLK_MOUT_PERIC1_USI13_USI 7
+#define CLK_MOUT_PERIC1_USI14_USI 8
+#define CLK_MOUT_PERIC1_USI15_USI 9
+#define CLK_MOUT_PERIC1_USI16_USI 10
+#define CLK_MOUT_PERIC1_USI17_USI 11
+#define CLK_MOUT_PERIC1_USI_I2C 12
+#define CLK_MOUT_PERIC1_I3C 13
+
+#define CLK_DOUT_PERIC1_USI09_USI 14
+#define CLK_DOUT_PERIC1_USI10_USI 15
+#define CLK_DOUT_PERIC1_USI11_USI 16
+#define CLK_DOUT_PERIC1_USI12_USI 17
+#define CLK_DOUT_PERIC1_USI13_USI 18
+#define CLK_DOUT_PERIC1_USI14_USI 19
+#define CLK_DOUT_PERIC1_USI15_USI 20
+#define CLK_DOUT_PERIC1_USI16_USI 21
+#define CLK_DOUT_PERIC1_USI17_USI 22
+#define CLK_DOUT_PERIC1_USI_I2C 23
+#define CLK_DOUT_PERIC1_I3C 24
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_NOC_USER 1
+#define CLK_MOUT_MISC_GIC 2
+
+#define CLK_DOUT_MISC_OTP 3
+#define CLK_DOUT_MISC_NOCP 4
+#define CLK_DOUT_MISC_OSC_DIV2 5
+
+/* CMU_HSI0 */
+#define CLK_MOUT_HSI0_NOC_USER 1
+
+#define CLK_DOUT_HSI0_PCIE_APB 2
+
+/* CMU_HSI1 */
+#define CLK_MOUT_HSI1_MMC_CARD_USER 1
+#define CLK_MOUT_HSI1_NOC_USER 2
+#define CLK_MOUT_HSI1_USBDRD_USER 3
+#define CLK_MOUT_HSI1_USBDRD 4
+
#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/power/mediatek,mt6735-power-controller.h b/include/dt-bindings/power/mediatek,mt6735-power-controller.h
new file mode 100644
index 000000000000..6957075fcb9e
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt6735-power-controller.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H
+#define _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H
+
+#define MT6735_POWER_DOMAIN_MD1 0
+#define MT6735_POWER_DOMAIN_CONN 1
+#define MT6735_POWER_DOMAIN_DIS 2
+#define MT6735_POWER_DOMAIN_MFG 3
+#define MT6735_POWER_DOMAIN_ISP 4
+#define MT6735_POWER_DOMAIN_VDE 5
+#define MT6735_POWER_DOMAIN_VEN 6
+
+#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 608087fb9a3d..df599bf46220 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -218,6 +218,7 @@
/* SDM845 Power Domain performance levels */
#define RPMH_REGULATOR_LEVEL_RETENTION 16
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D3 50
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
@@ -238,6 +239,7 @@
#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
+#define RPMH_REGULATOR_LEVEL_TURBO_L4 452
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4d5ee84c468b..7dd24acd9ffe 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1164,8 +1164,6 @@ int acpi_subsys_suspend_noirq(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
int acpi_subsys_poweroff(struct device *dev);
-void acpi_ec_mark_gpe_for_wake(void);
-void acpi_ec_set_gpe_wake_mask(u8 action);
int acpi_subsys_restore_early(struct device *dev);
#else
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
@@ -1176,6 +1174,12 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
+#endif
+
+#if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP)
+void acpi_ec_mark_gpe_for_wake(void);
+void acpi_ec_set_gpe_wake_mask(u8 action);
+#else
static inline void acpi_ec_mark_gpe_for_wake(void) {}
static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 05e758b8b894..3ffa5341dce2 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -20,12 +20,6 @@ enum alarmtimer_type {
ALARM_BOOTTIME_FREEZER,
};
-enum alarmtimer_restart {
- ALARMTIMER_NORESTART,
- ALARMTIMER_RESTART,
-};
-
-
#define ALARMTIMER_STATE_INACTIVE 0x00
#define ALARMTIMER_STATE_ENQUEUED 0x01
@@ -42,14 +36,14 @@ enum alarmtimer_restart {
struct alarm {
struct timerqueue_node node;
struct hrtimer timer;
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
+ void (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
int state;
void *data;
};
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
+ void (*function)(struct alarm *, ktime_t));
void alarm_start(struct alarm *alarm, ktime_t start);
void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index b721f360d759..4a952c4885ed 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -11,10 +11,6 @@
void topology_normalize_cpu_scale(void);
int topology_update_cpu_topology(void);
-#ifdef CONFIG_ACPI_CPPC_LIB
-void topology_init_cpu_capacity_cppc(void);
-#endif
-
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index f59099a213d0..67f6fdf2e7cd 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -315,8 +315,6 @@ u32 arm_smccc_get_version(void);
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
-extern u64 smccc_has_sve_hint;
-
/**
* arm_smccc_get_soc_id_version()
*
@@ -415,15 +413,6 @@ struct arm_smccc_quirk {
};
/**
- * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
- *
- * Sets the SMCCC hint bit to indicate if there is live state in the SVE
- * registers, this modifies x0 in place and should never be called from C
- * code.
- */
-asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
-
-/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
@@ -490,20 +479,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
-/* nVHE hypervisor doesn't have a current thread so needs separate checks */
-#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
-
-#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \
- ARM64_SVE)
-#define smccc_sve_clobbers "x16", "x30", "cc",
-
-#else
-
-#define SMCCC_SVE_CHECK
-#define smccc_sve_clobbers
-
-#endif
-
#define __constraint_read_2 "r" (arg0)
#define __constraint_read_3 __constraint_read_2, "r" (arg1)
#define __constraint_read_4 __constraint_read_3, "r" (arg2)
@@ -574,12 +549,11 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register unsigned long r3 asm("r3"); \
CONCATENATE(__declare_arg_, \
COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
- asm volatile(SMCCC_SVE_CHECK \
- inst "\n" : \
+ asm volatile(inst "\n" : \
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
: CONCATENATE(__constraint_read_, \
COUNT_ARGS(__VA_ARGS__)) \
- : smccc_sve_clobbers "memory"); \
+ : "memory"); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
} while (0)
@@ -628,7 +602,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
asm ("" : \
: CONCATENATE(__constraint_read_, \
COUNT_ARGS(__VA_ARGS__)) \
- : smccc_sve_clobbers "memory"); \
+ : "memory"); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h
index 83f9c6e1e5e9..b41bce82a191 100644
--- a/include/linux/asn1_decoder.h
+++ b/include/linux/asn1_decoder.h
@@ -9,6 +9,7 @@
#define _LINUX_ASN1_DECODER_H
#include <linux/asn1.h>
+#include <linux/types.h>
struct asn1_decoder;
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
index 08cd0c2ad34f..d17484dffb74 100644
--- a/include/linux/asn1_encoder.h
+++ b/include/linux/asn1_encoder.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/asn1.h>
#include <linux/asn1_ber_bytecode.h>
-#include <linux/bug.h>
#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
unsigned char *
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
deleted file mode 100644
index 76860a461ed2..000000000000
--- a/include/linux/ath9k_platform.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2008 Atheros Communications Inc.
- * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _LINUX_ATH9K_PLATFORM_H
-#define _LINUX_ATH9K_PLATFORM_H
-
-#define ATH9K_PLAT_EEP_MAX_WORDS 2048
-
-struct ath9k_platform_data {
- const char *eeprom_name;
-
- u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
- u8 *macaddr;
-
- int led_pin;
- u32 gpio_mask;
- u32 gpio_val;
-
- u32 bt_active_pin;
- u32 bt_priority_pin;
- u32 wlan_active_pin;
-
- bool endian_check;
- bool is_clk_25mhz;
- bool tx_gain_buffalo;
- bool disable_2ghz;
- bool disable_5ghz;
- bool led_active_high;
-
- int (*get_mac_revision)(void);
- int (*external_reset)(void);
-
- bool use_eeprom;
-};
-
-#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index f41395264dca..13a11f3c09b8 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -89,6 +89,9 @@ enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
+enum virtchnl_bw_limit_type {
+ VIRTCHNL_BW_SHAPER = 0,
+};
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
@@ -151,6 +154,11 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ /* opcode 57 - 65 are reserved */
+ VIRTCHNL_OP_GET_QOS_CAPS = 66,
+ /* opcode 68 through 111 are reserved */
+ VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+ VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_MAX,
};
@@ -261,6 +269,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -1416,6 +1425,86 @@ struct virtchnl_fdir_del {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+struct virtchnl_shaper_bw {
+ /* Unit is Kbps */
+ u32 committed;
+ u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
+ */
+struct virtchnl_qos_cap_elem {
+ u8 tc_num;
+ u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT 0
+#define VIRTCHNL_ABITER_ETS 2
+ u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT 1
+ u8 weight;
+ enum virtchnl_bw_limit_type type;
+ union {
+ struct virtchnl_shaper_bw shaper;
+ u8 pad2[32];
+ };
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+ u16 vsi_id;
+ u16 num_elem;
+ struct virtchnl_qos_cap_elem cap[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list);
+#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+ u16 queue_id;
+ u8 tc;
+ u8 pad;
+ struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+ u16 vsi_id;
+ u16 num_queues;
+ struct virtchnl_queue_bw cfg[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg);
+#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16
+
+enum virtchnl_queue_type {
+ VIRTCHNL_QUEUE_TYPE_TX = 0,
+ VIRTCHNL_QUEUE_TYPE_RX = 1,
+};
+
+/* structure to specify a chunk of contiguous queues */
+struct virtchnl_queue_chunk {
+ /* see enum virtchnl_queue_type */
+ s32 type;
+ u16 start_queue_id;
+ u16 num_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
+
+struct virtchnl_quanta_cfg {
+ u16 quanta_size;
+ u16 pad;
+ struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+
#define __vss_byone(p, member, count, old) \
(struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
@@ -1438,6 +1527,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
__vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
__vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
__vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \
+ __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \
__vss(virtchnl_rss_key, __vss_byone, p, m, c), \
__vss(virtchnl_rss_lut, __vss_byone, p, m, c))
@@ -1637,6 +1728,35 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
valid_len = sizeof(struct virtchnl_vlan_setting);
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_queues_bw_cfg *q_bw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+
+ valid_len = virtchnl_struct_size(q_bw, cfg,
+ q_bw->num_queues);
+ if (q_bw->num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ valid_len = sizeof(struct virtchnl_quanta_cfg);
+ if (msglen >= valid_len) {
+ struct virtchnl_quanta_cfg *q_quanta =
+ (struct virtchnl_quanta_cfg *)msg;
+
+ if (q_quanta->quanta_size == 0 ||
+ q_quanta->queue_select.num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
index dd831c269e99..dbf0f74c1529 100644
--- a/include/linux/bio-integrity.h
+++ b/include/linux/bio-integrity.h
@@ -72,7 +72,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
unsigned int nr);
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset);
-int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
+int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len);
void bio_integrity_unmap_user(struct bio *bio);
bool bio_integrity_prep(struct bio *bio);
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
@@ -99,7 +99,7 @@ static inline void bioset_integrity_free(struct bio_set *bs)
}
static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
- ssize_t len, u32 seed)
+ ssize_t len)
{
return -EINVAL;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index faceadb040f9..60830a6a5939 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -418,8 +418,6 @@ bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
-int bio_add_zone_append_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
@@ -677,6 +675,23 @@ static inline void bio_clear_polled(struct bio *bio)
bio->bi_opf &= ~REQ_POLLED;
}
+/**
+ * bio_is_zone_append - is this a zone append bio?
+ * @bio: bio to check
+ *
+ * Check if @bio is a zone append operation. Core block layer code and end_io
+ * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
+ * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
+ * it is not natively supported.
+ */
+static inline bool bio_is_zone_append(struct bio *bio)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return false;
+ return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
+}
+
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index 676f8f860c47..c7eae0bfb013 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -28,7 +28,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
- ssize_t bytes, u32 seed);
+ ssize_t bytes);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
@@ -104,8 +104,7 @@ static inline int blk_rq_map_integrity_sg(struct request *q,
}
static inline int blk_rq_integrity_map_user(struct request *rq,
void __user *ubuf,
- ssize_t bytes,
- u32 seed)
+ ssize_t bytes)
{
return -EINVAL;
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 4fecf46ef681..c596e0e4cb75 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -156,9 +156,6 @@ struct request {
struct blk_crypto_keyslot *crypt_keyslot;
#endif
- enum rw_hint write_hint;
- unsigned short ioprio;
-
enum mq_rq_state state;
atomic_t ref;
@@ -222,7 +219,9 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
static inline unsigned short req_get_ioprio(struct request *req)
{
- return req->ioprio;
+ if (req->bio)
+ return req->bio->bi_ioprio;
+ return 0;
}
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
@@ -230,62 +229,61 @@ static inline unsigned short req_get_ioprio(struct request *req)
#define rq_dma_dir(rq) \
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
-#define rq_list_add(listptr, rq) do { \
- (rq)->rq_next = *(listptr); \
- *(listptr) = rq; \
-} while (0)
-
-#define rq_list_add_tail(lastpptr, rq) do { \
- (rq)->rq_next = NULL; \
- **(lastpptr) = rq; \
- *(lastpptr) = &rq->rq_next; \
-} while (0)
-
-#define rq_list_pop(listptr) \
-({ \
- struct request *__req = NULL; \
- if ((listptr) && *(listptr)) { \
- __req = *(listptr); \
- *(listptr) = __req->rq_next; \
- } \
- __req; \
-})
+static inline int rq_list_empty(const struct rq_list *rl)
+{
+ return rl->head == NULL;
+}
-#define rq_list_peek(listptr) \
-({ \
- struct request *__req = NULL; \
- if ((listptr) && *(listptr)) \
- __req = *(listptr); \
- __req; \
-})
+static inline void rq_list_init(struct rq_list *rl)
+{
+ rl->head = NULL;
+ rl->tail = NULL;
+}
-#define rq_list_for_each(listptr, pos) \
- for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
+static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = NULL;
+ if (rl->tail)
+ rl->tail->rq_next = rq;
+ else
+ rl->head = rq;
+ rl->tail = rq;
+}
-#define rq_list_for_each_safe(listptr, pos, nxt) \
- for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
- pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
+static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = rl->head;
+ rl->head = rq;
+ if (!rl->tail)
+ rl->tail = rq;
+}
-#define rq_list_next(rq) (rq)->rq_next
-#define rq_list_empty(list) ((list) == (struct request *) NULL)
+static inline struct request *rq_list_pop(struct rq_list *rl)
+{
+ struct request *rq = rl->head;
-/**
- * rq_list_move() - move a struct request from one list to another
- * @src: The source list @rq is currently in
- * @dst: The destination list that @rq will be appended to
- * @rq: The request to move
- * @prev: The request preceding @rq in @src (NULL if @rq is the head)
- */
-static inline void rq_list_move(struct request **src, struct request **dst,
- struct request *rq, struct request *prev)
+ if (rq) {
+ rl->head = rl->head->rq_next;
+ if (!rl->head)
+ rl->tail = NULL;
+ rq->rq_next = NULL;
+ }
+
+ return rq;
+}
+
+static inline struct request *rq_list_peek(struct rq_list *rl)
{
- if (prev)
- prev->rq_next = rq->rq_next;
- else
- *src = rq->rq_next;
- rq_list_add(dst, rq);
+ return rl->head;
}
+#define rq_list_for_each(rl, pos) \
+ for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
+
+#define rq_list_for_each_safe(rl, pos, nxt) \
+ for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
+ pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
+
/**
* enum blk_eh_timer_return - How the timeout handler should proceed
* @BLK_EH_DONE: The block driver completed the command or will complete it at
@@ -577,7 +575,7 @@ struct blk_mq_ops {
* empty the @rqlist completely, then the rest will be queued
* individually by the block layer upon return.
*/
- void (*queue_rqs)(struct request **rqlist);
+ void (*queue_rqs)(struct rq_list *rqlist);
/**
* @get_budget: Reserve budget before queue request, once .queue_rq is
@@ -857,12 +855,6 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib);
*/
static inline bool blk_mq_need_time_stamp(struct request *rq)
{
- /*
- * passthrough io doesn't use iostat accounting, cgroup stats
- * and io scheduler functionalities.
- */
- if (blk_rq_is_passthrough(rq))
- return false;
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
}
@@ -892,7 +884,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
else if (iob->complete != complete)
return false;
iob->need_ts |= blk_mq_need_time_stamp(req);
- rq_list_add(&iob->req_list, req);
+ rq_list_add_tail(&iob->req_list, req);
return true;
}
@@ -925,6 +917,8 @@ void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
+void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
+void blk_freeze_queue_start_non_owner(struct request_queue *q);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
@@ -989,7 +983,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
rq->nr_phys_segments = nr_segs;
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
- rq->ioprio = bio_prio(bio);
}
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 50c3b959da28..a1fd0ddce5cf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -25,6 +25,7 @@
#include <linux/uuid.h>
#include <linux/xarray.h>
#include <linux/file.h>
+#include <linux/lockdep.h>
struct module;
struct request_queue;
@@ -194,7 +195,7 @@ struct gendisk {
unsigned int nr_zones;
unsigned int zone_capacity;
unsigned int last_zone_capacity;
- unsigned long *conv_zones_bitmap;
+ unsigned long __rcu *conv_zones_bitmap;
unsigned int zone_wplugs_hash_bits;
spinlock_t zone_wplugs_lock;
struct mempool_s *zone_wplugs_pool;
@@ -349,6 +350,9 @@ typedef unsigned int __bitwise blk_flags_t;
/* I/O topology is misaligned */
#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
+/* passthrough command IO accounting */
+#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
+
struct queue_limits {
blk_features_t features;
blk_flags_t flags;
@@ -371,6 +375,7 @@ struct queue_limits {
unsigned int max_user_discard_sectors;
unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_hw_zone_append_sectors;
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -471,6 +476,11 @@ struct request_queue {
struct xarray hctx_table;
struct percpu_ref q_usage_counter;
+ struct lock_class_key io_lock_cls_key;
+ struct lockdep_map io_lockdep_map;
+
+ struct lock_class_key q_lock_cls_key;
+ struct lockdep_map q_lockdep_map;
struct request *last_merge;
@@ -566,6 +576,10 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+#ifdef CONFIG_LOCKDEP
+ struct task_struct *mq_freeze_owner;
+ int mq_freeze_owner_depth;
+#endif
wait_queue_head_t mq_freeze_wq;
/*
* Protect concurrent access to q_usage_counter by
@@ -617,6 +631,8 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
+#define blk_queue_passthrough_stat(q) \
+ ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
@@ -725,6 +741,9 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode);
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline int __must_check add_disk(struct gendisk *disk)
@@ -929,6 +948,7 @@ queue_limits_start_update(struct request_queue *q)
int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+int blk_validate_limits(struct queue_limits *lim);
/**
* queue_limits_cancel_update - cancel an atomic update of queue limits
@@ -986,6 +1006,11 @@ extern void blk_put_queue(struct request_queue *);
void blk_mark_disk_dead(struct gendisk *disk);
+struct rq_list {
+ struct request *head;
+ struct request *tail;
+};
+
#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
@@ -999,10 +1024,10 @@ void blk_mark_disk_dead(struct gendisk *disk);
* blk_flush_plug() is called.
*/
struct blk_plug {
- struct request *mq_list; /* blk-mq requests */
+ struct rq_list mq_list; /* blk-mq requests */
/* if ios_left is > 1, we can batch tag/rq allocations */
- struct request *cached_rq;
+ struct rq_list cached_rqs;
u64 cur_ktime;
unsigned short nr_ios;
@@ -1145,6 +1170,11 @@ enum blk_default_limits {
*/
#define BLK_DEF_MAX_SECTORS_CAP 2560u
+static inline struct queue_limits *bdev_limits(struct block_device *bdev)
+{
+ return &bdev_get_queue(bdev)->limits;
+}
+
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
@@ -1185,25 +1215,9 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int
-queue_limits_max_zone_append_sectors(const struct queue_limits *l)
-{
- unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
-
- return min_not_zero(l->max_zone_append_sectors, max_sectors);
-}
-
-static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
-{
- if (!blk_queue_is_zoned(q))
- return 0;
-
- return queue_limits_max_zone_append_sectors(&q->limits);
-}
-
static inline bool queue_emulates_zone_append(struct request_queue *q)
{
- return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
+ return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
}
static inline bool bdev_emulates_zone_append(struct block_device *bdev)
@@ -1214,7 +1228,7 @@ static inline bool bdev_emulates_zone_append(struct block_device *bdev)
static inline unsigned int
bdev_max_zone_append_sectors(struct block_device *bdev)
{
- return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+ return bdev_limits(bdev)->max_zone_append_sectors;
}
static inline unsigned int bdev_max_segments(struct block_device *bdev)
@@ -1279,23 +1293,23 @@ unsigned int bdev_discard_alignment(struct block_device *bdev);
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_discard_sectors;
+ return bdev_limits(bdev)->max_discard_sectors;
}
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.discard_granularity;
+ return bdev_limits(bdev)->discard_granularity;
}
static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
+ return bdev_limits(bdev)->max_secure_erase_sectors;
}
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors;
+ return bdev_limits(bdev)->max_write_zeroes_sectors;
}
static inline bool bdev_nonrot(struct block_device *bdev)
@@ -1331,7 +1345,7 @@ static inline bool bdev_write_cache(struct block_device *bdev)
static inline bool bdev_fua(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA;
+ return bdev_limits(bdev)->features & BLK_FEAT_FUA;
}
static inline bool bdev_nowait(struct block_device *bdev)
@@ -1376,6 +1390,33 @@ static inline bool bdev_is_zone_start(struct block_device *bdev,
return bdev_offset_from_zone_start(bdev, sector) == 0;
}
+/**
+ * bdev_zone_is_seq - check if a sector belongs to a sequential write zone
+ * @bdev: block device to check
+ * @sector: sector number
+ *
+ * Check if @sector on @bdev is contained in a sequential write required zone.
+ */
+static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
+{
+ bool is_seq = false;
+
+#if IS_ENABLED(CONFIG_BLK_DEV_ZONED)
+ if (bdev_is_zoned(bdev)) {
+ struct gendisk *disk = bdev->bd_disk;
+ unsigned long *bitmap;
+
+ rcu_read_lock();
+ bitmap = rcu_dereference(disk->conv_zones_bitmap);
+ is_seq = !bitmap ||
+ !test_bit(disk_zone_no(disk, sector), bitmap);
+ rcu_read_unlock();
+ }
+#endif
+
+ return is_seq;
+}
+
static inline int queue_dma_alignment(const struct request_queue *q)
{
return q->limits.dma_alignment;
@@ -1648,7 +1689,7 @@ int bdev_thaw(struct block_device *bdev);
void bdev_fput(struct file *bdev_file);
struct io_comp_batch {
- struct request *req_list;
+ struct rq_list req_list;
bool need_ts;
void (*complete)(struct io_comp_batch *);
};
@@ -1674,6 +1715,22 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev)
return true;
}
+static inline unsigned int
+bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
+{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
+}
+
+static inline unsigned int
+bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
+{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index ce91d9b2acb9..f0f219271daf 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -209,7 +209,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \
+ if (__sk && __sk == skb_to_full_sk(skb) && \
cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
CGROUP_INET_EGRESS); \
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bdadb0bb6cec..3ace0d6227e3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -203,6 +203,7 @@ enum btf_field_type {
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
BPF_WORKQUEUE = (1 << 10),
+ BPF_UPTR = (1 << 11),
};
typedef void (*btf_dtor_kfunc_t)(void *);
@@ -322,6 +323,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "kptr";
case BPF_KPTR_PERCPU:
return "percpu_kptr";
+ case BPF_UPTR:
+ return "uptr";
case BPF_LIST_HEAD:
return "bpf_list_head";
case BPF_LIST_NODE:
@@ -350,6 +353,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
return sizeof(u64);
case BPF_LIST_HEAD:
return sizeof(struct bpf_list_head);
@@ -379,6 +383,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
return __alignof__(u64);
case BPF_LIST_HEAD:
return __alignof__(struct bpf_list_head);
@@ -419,6 +424,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
break;
default:
WARN_ON_ONCE(1);
@@ -507,6 +513,25 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
}
+static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
+{
+ unsigned long *src_uptr, *dst_uptr;
+ const struct btf_field *field;
+ int i;
+
+ if (!btf_record_has_field(rec, BPF_UPTR))
+ return;
+
+ for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
+ if (field->type != BPF_UPTR)
+ continue;
+
+ src_uptr = src + field->offset;
+ dst_uptr = dst + field->offset;
+ swap(*src_uptr, *dst_uptr);
+ }
+}
+
static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
{
u32 curr_off = 0;
@@ -907,10 +932,6 @@ enum bpf_reg_type {
* additional context, assume the value is non-null.
*/
PTR_TO_BTF_ID,
- /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
- * been checked for null. Used primarily to inform the verifier
- * an explicit null check is required for this struct.
- */
PTR_TO_MEM, /* reg points to valid memory region */
PTR_TO_ARENA,
PTR_TO_BUF, /* reg points to a read/write buffer */
@@ -923,6 +944,10 @@ enum bpf_reg_type {
PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
+ */
PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is
@@ -1300,8 +1325,12 @@ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
#ifdef CONFIG_BPF_JIT
-int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
-int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
@@ -1373,7 +1402,8 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
+void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
@@ -1382,12 +1412,14 @@ void bpf_jit_uncharge_modmem(u32 size);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
- struct bpf_trampoline *tr)
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
{
return -ENOTSUPP;
}
static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
- struct bpf_trampoline *tr)
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
{
return -ENOTSUPP;
}
@@ -1476,6 +1508,7 @@ struct bpf_prog_aux {
u32 max_rdwr_access;
struct btf *attach_btf;
const struct bpf_ctx_arg_aux *ctx_arg_info;
+ void __percpu *priv_stack_ptr;
struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
struct bpf_prog *dst_prog;
struct bpf_trampoline *dst_trampoline;
@@ -1491,7 +1524,13 @@ struct bpf_prog_aux {
bool xdp_has_frags;
bool exception_cb;
bool exception_boundary;
+ bool is_extended; /* true if extended by freplace program */
+ bool jits_use_priv_stack;
+ bool priv_stack_requested;
+ u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
+ struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
struct bpf_arena *arena;
+ void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -3461,4 +3500,10 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
return prog->aux->func_idx != 0;
}
+static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
+{
+ return prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_RAW_TP;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index dcddb0aef7d8..ab7244d8108f 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -77,7 +77,13 @@ struct bpf_local_storage_elem {
struct hlist_node map_node; /* Linked to bpf_local_storage_map */
struct hlist_node snode; /* Linked to bpf_local_storage */
struct bpf_local_storage __rcu *local_storage;
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct hlist_node free_node; /* used to postpone
+ * bpf_selem_free
+ * after raw_spin_unlock
+ */
+ };
/* 8 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
@@ -181,7 +187,7 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
- bool charge_mem, gfp_t gfp_flags);
+ bool charge_mem, bool swap_uptrs, gfp_t gfp_flags);
void bpf_selem_free(struct bpf_local_storage_elem *selem,
struct bpf_local_storage_map *smap,
@@ -195,7 +201,7 @@ bpf_local_storage_alloc(void *owner,
struct bpf_local_storage_data *
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
- void *value, u64 map_flags, gfp_t gfp_flags);
+ void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 4513372c5bc8..f4290c179bee 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -48,22 +48,6 @@ enum bpf_reg_liveness {
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
};
-/* For every reg representing a map value or allocated object pointer,
- * we consider the tuple of (ptr, id) for them to be unique in verifier
- * context and conside them to not alias each other for the purposes of
- * tracking lock state.
- */
-struct bpf_active_lock {
- /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
- * there's no active lock held, and other fields have no
- * meaning. If non-NULL, it indicates that a lock is held and
- * id member has the reg->id of the register which can be >= 0.
- */
- void *ptr;
- /* This will be reg->id */
- u32 id;
-};
-
#define ITER_PREFIX "bpf_iter_"
enum bpf_iter_state {
@@ -266,6 +250,13 @@ struct bpf_stack_state {
};
struct bpf_reference_state {
+ /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
+ * default to pointer reference on zero initialization of a state.
+ */
+ enum ref_state_type {
+ REF_TYPE_PTR = 0,
+ REF_TYPE_LOCK,
+ } type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
*/
@@ -274,17 +265,10 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
- /* There can be a case like:
- * main (frame 0)
- * cb (frame 1)
- * func (frame 3)
- * cb (frame 4)
- * Hence for frame 4, if callback_ref just stored boolean, it would be
- * impossible to distinguish nested callback refs. Hence store the
- * frameno and compare that to callback_ref in check_reference_leak when
- * exiting a callback function.
- */
- int callback_ref;
+ /* Use to keep track of the source object of a lock, to ensure
+ * it matches on unlock.
+ */
+ void *ptr;
};
struct bpf_retval_range {
@@ -332,6 +316,7 @@ struct bpf_func_state {
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
+ int active_locks;
struct bpf_reference_state *refs;
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
* (i.e. 8) bytes worth of stack memory.
@@ -349,7 +334,7 @@ struct bpf_func_state {
#define MAX_CALL_FRAMES 8
-/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+/* instruction history flags, used in bpf_insn_hist_entry.flags field */
enum {
/* instruction references stack slot through PTR_TO_STACK register;
* we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
@@ -367,7 +352,7 @@ enum {
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
-struct bpf_jmp_history_entry {
+struct bpf_insn_hist_entry {
u32 idx;
/* insn idx can't be bigger than 1 million */
u32 prev_idx : 22;
@@ -434,7 +419,6 @@ struct bpf_verifier_state {
u32 insn_idx;
u32 curframe;
- struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
u32 active_preempt_lock;
@@ -458,13 +442,14 @@ struct bpf_verifier_state {
* See get_loop_entry() for more information.
*/
struct bpf_verifier_state *loop_entry;
- /* jmp history recorded from first to last.
- * backtracking is using it to go from last to first.
- * For most states jmp_history_cnt is [0-3].
+ /* Sub-range of env->insn_hist[] corresponding to this state's
+ * instruction history.
+ * Backtracking is using it to go from last to first.
+ * For most states instruction history is short, 0-3 instructions.
* For loops can go up to ~40.
*/
- struct bpf_jmp_history_entry *jmp_history;
- u32 jmp_history_cnt;
+ u32 insn_hist_start;
+ u32 insn_hist_end;
u32 dfs_depth;
u32 callback_unroll_depth;
u32 may_goto_depth;
@@ -649,6 +634,12 @@ struct bpf_subprog_arg_info {
};
};
+enum priv_stack_mode {
+ PRIV_STACK_UNKNOWN,
+ NO_PRIV_STACK,
+ PRIV_STACK_ADAPTIVE,
+};
+
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
@@ -669,6 +660,7 @@ struct bpf_subprog_info {
/* true if bpf_fastcall stack region is used by functions that can't be inlined */
bool keep_fastcall_stack: 1;
+ enum priv_stack_mode priv_stack_mode;
u8 arg_cnt;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
};
@@ -747,7 +739,9 @@ struct bpf_verifier_env {
int cur_stack;
} cfg;
struct backtrack_state bt;
- struct bpf_jmp_history_entry *cur_hist_ent;
+ struct bpf_insn_hist_entry *insn_hist;
+ struct bpf_insn_hist_entry *cur_hist_ent;
+ u32 insn_hist_cap;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -888,6 +882,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
case BPF_PROG_TYPE_TRACING:
return prog->expected_attach_type != BPF_TRACE_ITER;
case BPF_PROG_TYPE_STRUCT_OPS:
+ return prog->aux->jits_use_priv_stack;
case BPF_PROG_TYPE_LSM:
return false;
default:
diff --git a/include/linux/btf.h b/include/linux/btf.h
index b8a583194c4a..4214e76c9168 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -75,6 +75,7 @@
#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
+#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
@@ -581,6 +582,16 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
+
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -660,15 +671,4 @@ static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *fun
return -EOPNOTSUPP;
}
#endif
-
-static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
-{
- if (!btf_type_is_ptr(t))
- return false;
-
- t = btf_type_skip_modifiers(btf, t->type, NULL);
-
- return btf_type_is_struct(t);
-}
-
#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index c0e3e1426a82..139bdececdcf 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[];
extern u32 bpf_cgroup_btf_id[];
extern u32 bpf_local_storage_map_btf_id[];
extern u32 btf_bpf_map_id[];
+extern u32 bpf_kmem_cache_btf_id[];
#endif
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index 6617d9c68d86..83e6613d12ae 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -28,13 +28,6 @@
extern unsigned char * cfag12864b_buffer;
/*
- * Get the refresh rate of the LCD
- *
- * Returns the refresh rate (hertz).
- */
-extern unsigned int cfag12864b_getrate(void);
-
-/*
* Enable refreshing
*
* Returns 0 if successful (anyone was using it),
@@ -50,16 +43,6 @@ extern unsigned char cfag12864b_enable(void);
extern void cfag12864b_disable(void);
/*
- * Is enabled refreshing? (is anyone using the module?)
- *
- * Returns 0 if refreshing is not enabled (anyone is using it),
- * or != 0 if refreshing is enabled (someone is using it).
- *
- * Useful for buffer read-only modules.
- */
-extern unsigned char cfag12864b_isenabled(void);
-
-/*
* Is the module inited?
*/
extern unsigned char cfag12864b_isinited(void);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 47ae4c4d924c..1b20d2d8ef7c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -327,6 +327,7 @@ struct cgroup_base_stat {
#ifdef CONFIG_SCHED_CORE
u64 forceidle_sum;
#endif
+ u64 ntime;
};
/*
@@ -397,7 +398,7 @@ struct cgroup_freezer_state {
bool freeze;
/* Should the cgroup actually be frozen? */
- int e_freeze;
+ bool e_freeze;
/* Fields below are protected by css_set_lock */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 038b2d523bf8..966fcc5ff8ef 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -234,7 +234,7 @@ const volatile void * __must_check_fn(const volatile void *val)
* DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
*
* CLASS(fdget, f)(fd);
- * if (!fd_file(f))
+ * if (fd_empty(f))
* return -EBADF;
*
* // use 'f' without concern
@@ -273,6 +273,12 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* an anonymous instance of the (guard) class, not recommended for
* conditional locks.
*
+ * if_not_guard(name, args...) { <error handling> }:
+ * convenience macro for conditional guards that calls the statement that
+ * follows only if the lock was not acquired (typically an error return).
+ *
+ * Only for conditional locks.
+ *
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
@@ -285,14 +291,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* similar to scoped_guard(), except it does fail when the lock
* acquire fails.
*
+ * Only for conditional locks.
*/
+#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
+static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
+
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
- { return *_T; }
+ { return (void *)(__force unsigned long)*_T; }
#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
EXTEND_CLASS(_name, _ext, \
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
class_##_name##_t _T) \
@@ -303,16 +315,48 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
CLASS(_name, __UNIQUE_ID(guard))
#define __guard_ptr(_name) class_##_name##_lock_ptr
+#define __is_cond_ptr(_name) class_##_name##_is_conditional
-#define scoped_guard(_name, args...) \
- for (CLASS(_name, scope)(args), \
- *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
-
-#define scoped_cond_guard(_name, _fail, args...) \
- for (CLASS(_name, scope)(args), \
- *done = NULL; !done; done = (void *)1) \
- if (!__guard_ptr(_name)(&scope)) _fail; \
- else
+/*
+ * Helper macro for scoped_guard().
+ *
+ * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
+ * compiler would be sure that for the unconditional locks the body of the
+ * loop (caller-provided code glued to the else clause) could not be skipped.
+ * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
+ * hard to deduce (even if could be proven true for unconditional locks).
+ */
+#define __scoped_guard(_name, _label, args...) \
+ for (CLASS(_name, scope)(args); \
+ __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
+ ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_guard(_name, args...) \
+ __scoped_guard(_name, __UNIQUE_ID(label), args)
+
+#define __scoped_cond_guard(_name, _fail, _label, args...) \
+ for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
+ if (!__guard_ptr(_name)(&scope)) { \
+ BUILD_BUG_ON(!__is_cond_ptr(_name)); \
+ _fail; \
+_label: \
+ break; \
+ } else
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
+
+#define __if_not_guard(_name, _id, args...) \
+ BUILD_BUG_ON(!__is_cond_ptr(_name)); \
+ CLASS(_name, _id)(args); \
+ if (!__guard_ptr(_name)(&_id))
+
+#define if_not_guard(_name, args...) \
+ __if_not_guard(_name, __UNIQUE_ID(guard), args)
/*
* Additional helper macros for generating lock guards with types, either for
@@ -347,7 +391,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \
\
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
{ \
- return _T->lock; \
+ return (void *)(__force unsigned long)_T->lock; \
}
@@ -369,14 +413,17 @@ static inline class_##_name##_t class_##_name##_constructor(void) \
}
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)
#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
EXTEND_CLASS(_name, _ext, \
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
if (_T->lock && !(_condlock)) _T->lock = NULL; \
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d35b677b08fe..ef1b16da6ad5 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -215,7 +215,6 @@ static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
index 2bb4d8c2f1b0..c4ef4ae2eded 100644
--- a/include/linux/clocksource_ids.h
+++ b/include/linux/clocksource_ids.h
@@ -6,6 +6,7 @@
enum clocksource_ids {
CSID_GENERIC = 0,
CSID_ARM_ARCH_COUNTER,
+ CSID_S390_TOD,
CSID_X86_TSC_EARLY,
CSID_X86_TSC,
CSID_X86_KVM_CLK,
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 1a957ea2f4fe..0c8b9601e603 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -266,6 +266,12 @@ struct ftrace_likely_data {
#define noinline_for_stack noinline
/*
+ * Use noinline_for_tracing for functions that should not be inlined.
+ * For tracing reasons.
+ */
+#define noinline_for_tracing noinline
+
+/*
* Sanitizer helper attributes: Because using __always_inline and
* __no_sanitize_* conflict, provide helper attributes that will either expand
* to __no_sanitize_* in compilation units where instrumentation is enabled
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 2361ed4d2b15..a04b73c40173 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_EIOINTC_STARTING,
CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
@@ -208,7 +209,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
- CPUHP_AP_PERF_X86_RAPL_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
@@ -227,6 +227,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 0928a6c8ae1e..59444b495d49 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,9 +71,63 @@ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
-struct dentry *debugfs_create_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
+struct debugfs_short_fops {
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
+ loff_t (*llseek) (struct file *, loff_t, int);
+};
+
+struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct debugfs_short_fops *fops);
+
+/**
+ * debugfs_create_file - create a file in the debugfs filesystem
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations or struct debugfs_short_fops that
+ * should be used for this file.
+ *
+ * This is the basic "create a file" function for debugfs. It allows for a
+ * wide range of flexibility in creating a file, or a directory (if you want
+ * to create a directory, the debugfs_create_dir() function is
+ * recommended to be used instead.)
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
+ * returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ *
+ * If fops points to a struct debugfs_short_fops, then simple_open() will be
+ * used for the open, and only read/write/llseek are supported and are proxied,
+ * so no module reference or release are needed.
+ *
+ * NOTE: it's expected that most callers should _ignore_ the errors returned
+ * by this function. Other debugfs functions handle the fact that the "dentry"
+ * passed to them could be an error and they don't crash in that case.
+ * Drivers should generally work fine even if debugfs fails to init anyway.
+ */
+#define debugfs_create_file(name, mode, parent, data, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, fops)
+
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -207,7 +261,7 @@ static inline struct dentry *debugfs_lookup(const char *name,
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
- const struct file_operations *fops)
+ const void *fops)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 32444686b6ff..8b95545e7924 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -23,13 +23,17 @@ struct debug_obj_descr;
* @state: tracked object state
* @astate: current active state
* @object: pointer to the real object
+ * @batch_last: pointer to the last hlist node in a batch
* @descr: pointer to an object type specific debug description structure
*/
struct debug_obj {
- struct hlist_node node;
- enum debug_obj_state state;
- unsigned int astate;
- void *object;
+ struct hlist_node node;
+ enum debug_obj_state state;
+ unsigned int astate;
+ union {
+ void *object;
+ struct hlist_node *batch_last;
+ };
const struct debug_obj_descr *descr;
};
diff --git a/include/linux/delay.h b/include/linux/delay.h
index ff9cda975e30..89866bab100d 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -6,21 +6,12 @@
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
- *
- * Please note that ndelay(), udelay() and mdelay() may return early for
- * several reasons:
- * 1. computed loops_per_jiffy too low (due to the time taken to
- * execute the timer interrupt.)
- * 2. cache behaviour affecting the time it takes to execute the
- * loop function.
- * 3. CPU clock rate changes.
- *
- * Please see this thread:
- * https://lists.openwall.net/linux-kernel/2011/01/09/56
+ * Sleep routines using timer list timers or hrtimers.
*/
#include <linux/math.h>
#include <linux/sched.h>
+#include <linux/jiffies.h>
extern unsigned long loops_per_jiffy;
@@ -35,12 +26,21 @@ extern unsigned long loops_per_jiffy;
* The 2nd mdelay() definition ensures GCC will optimize away the
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.
*/
-
#ifndef MAX_UDELAY_MS
#define MAX_UDELAY_MS 5
#endif
#ifndef mdelay
+/**
+ * mdelay - Inserting a delay based on milliseconds with busy waiting
+ * @n: requested delay in milliseconds
+ *
+ * See udelay() for basic information about mdelay() and it's variants.
+ *
+ * Please double check, whether mdelay() is the right way to go or whether a
+ * refactoring of the code is the better variant to be able to use msleep()
+ * instead.
+ */
#define mdelay(n) (\
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
@@ -63,30 +63,75 @@ unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range_state(unsigned long min, unsigned long max,
unsigned int state);
+/**
+ * usleep_range - Sleep for an approximate time
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refere to usleep_range_state().
+ *
+ * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep.
+ */
static inline void usleep_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
}
-static inline void usleep_idle_range(unsigned long min, unsigned long max)
+/**
+ * usleep_range_idle - Sleep for an approximate time with idle time accounting
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refere to usleep_range_state().
+ *
+ * The sleeping task has the state TASK_IDLE during the sleep to prevent
+ * contribution to the load avarage.
+ */
+static inline void usleep_range_idle(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_IDLE);
}
+/**
+ * ssleep - wrapper for seconds around msleep
+ * @seconds: Requested sleep duration in seconds
+ *
+ * Please refere to msleep() for detailed information.
+ */
static inline void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
-/* see Documentation/timers/timers-howto.rst for the thresholds */
+static const unsigned int max_slack_shift = 2;
+#define USLEEP_RANGE_UPPER_BOUND ((TICK_NSEC << max_slack_shift) / NSEC_PER_USEC)
+
+/**
+ * fsleep - flexible sleep which autoselects the best mechanism
+ * @usecs: requested sleep duration in microseconds
+ *
+ * flseep() selects the best mechanism that will provide maximum 25% slack
+ * to the requested sleep duration. Therefore it uses:
+ *
+ * * udelay() loop for sleep durations <= 10 microseconds to avoid hrtimer
+ * overhead for really short sleep durations.
+ * * usleep_range() for sleep durations which would lead with the usage of
+ * msleep() to a slack larger than 25%. This depends on the granularity of
+ * jiffies.
+ * * msleep() for all other sleep durations.
+ *
+ * Note: When %CONFIG_HIGH_RES_TIMERS is not set, all sleeps are processed with
+ * the granularity of jiffies and the slack might exceed 25% especially for
+ * short sleep durations.
+ */
static inline void fsleep(unsigned long usecs)
{
if (usecs <= 10)
udelay(usecs);
- else if (usecs <= 20000)
- usleep_range(usecs, 2 * usecs);
+ else if (usecs < USLEEP_RANGE_UPPER_BOUND)
+ usleep_range(usecs, usecs + (usecs >> max_slack_shift));
else
- msleep(DIV_ROUND_UP(usecs, 1000));
+ msleep(DIV_ROUND_UP(usecs, USEC_PER_MSEC));
}
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index ca32b5bb28eb..eb2094e43050 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -276,6 +276,7 @@ do { \
dev_driver_string(dev), dev_name(dev), ## arg)
__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+__printf(3, 4) int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...);
/* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */
#define dev_err_ptr_probe(dev, ___err, fmt, ...) \
diff --git a/include/linux/dim.h b/include/linux/dim.h
index 1b581ff25a15..06543fd40fcc 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -351,7 +351,8 @@ void dim_park_tired(struct dim *dim);
* Takes into consideration counter wrap-around.
* Returned boolean indicates whether curr_stats are reliable.
*/
-bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(const struct dim_sample *start,
+ const struct dim_sample *end,
struct dim_stats *curr_stats);
/**
@@ -424,7 +425,7 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
* This is the main logic of the algorithm, where data is processed in order
* to decide on next required action.
*/
-void net_dim(struct dim *dim, struct dim_sample end_sample);
+void net_dim(struct dim *dim, const struct dim_sample *end_sample);
/* RDMA DIM */
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 81f7b623d0ba..5e4f9ab1cf75 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -26,6 +26,10 @@ struct dpll_device_ops {
struct netlink_ext_ack *extack);
int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
s32 *temp, struct netlink_ext_ack *extack);
+ int (*clock_quality_level_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ unsigned long *qls,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_ops {
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 82ebf9223948..f8811c46b89e 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -34,9 +34,6 @@ struct dw_apb_clocksource {
};
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced);
struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 281298e77a15..808b1a5102e7 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -127,7 +127,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
return;
- dql->last_obj_cnt = count;
+ WRITE_ONCE(dql->last_obj_cnt, count);
/* We want to force a write first, so that cpu do not attempt
* to get cache line containing last_obj_cnt, num_queued, adj_limit
diff --git a/include/linux/efi.h b/include/linux/efi.h
index e28d88066033..e5815867aba9 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -379,7 +379,6 @@ void efi_native_runtime_setup(void);
#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
-#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
@@ -581,15 +580,6 @@ struct efi_mem_range {
};
typedef struct {
- u32 version;
- u32 length;
- u64 memory_protection_attribute;
-} efi_properties_table_t;
-
-#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
-#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
-
-typedef struct {
u16 version;
u16 length;
u32 runtime_services_supported;
@@ -871,10 +861,9 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
#define EFI_DBG 8 /* Print additional debug info at runtime */
-#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
-#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
-#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
-#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
+#define EFI_MEM_ATTR 9 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 10 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 11 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 1ff52020cf75..752e0b297582 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -55,6 +55,8 @@ struct em_perf_table {
* struct em_perf_domain - Performance domain
* @em_table: Pointer to the runtime modifiable em_perf_table
* @nr_perf_states: Number of performance states
+ * @min_perf_state: Minimum allowed Performance State index
+ * @max_perf_state: Maximum allowed Performance State index
* @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
@@ -70,6 +72,8 @@ struct em_perf_table {
struct em_perf_domain {
struct em_perf_table __rcu *em_table;
int nr_perf_states;
+ int min_perf_state;
+ int max_perf_state;
unsigned long flags;
unsigned long cpus[];
};
@@ -173,13 +177,14 @@ void em_table_free(struct em_perf_table __rcu *table);
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states);
int em_dev_update_chip_binning(struct device *dev);
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
* @table: List of performance states, in ascending order
- * @nr_perf_states: Number of performance states
+ * @pd: performance domain for which this must be done
* @max_util: Max utilization to map with the EM
- * @pd_flags: Performance Domain flags
*
* It is called from the scheduler code quite frequently and as a consequence
* doesn't implement any check.
@@ -188,13 +193,16 @@ int em_dev_update_chip_binning(struct device *dev);
* requirement.
*/
static inline int
-em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
- unsigned long max_util, unsigned long pd_flags)
+em_pd_get_efficient_state(struct em_perf_state *table,
+ struct em_perf_domain *pd, unsigned long max_util)
{
+ unsigned long pd_flags = pd->flags;
+ int min_ps = pd->min_perf_state;
+ int max_ps = pd->max_perf_state;
struct em_perf_state *ps;
int i;
- for (i = 0; i < nr_perf_states; i++) {
+ for (i = min_ps; i <= max_ps; i++) {
ps = &table[i];
if (ps->performance >= max_util) {
if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
@@ -204,7 +212,7 @@ em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
}
}
- return nr_perf_states - 1;
+ return max_ps;
}
/**
@@ -253,8 +261,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* requested performance.
*/
em_table = rcu_dereference(pd->em_table);
- i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states,
- max_util, pd->flags);
+ i = em_pd_get_efficient_state(em_table->state, pd, max_util);
ps = &em_table->state[i];
/*
@@ -391,6 +398,12 @@ static inline int em_dev_update_chip_binning(struct device *dev)
{
return -EINVAL;
}
+static inline
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 1e50cdb83ae5..fc61d0205c97 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -64,7 +64,8 @@
#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+ _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
+ _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
ARCH_EXIT_TO_USER_MODE_WORK)
/**
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 6813171afccb..16149f6625e4 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -17,8 +17,9 @@
#endif
#define XFER_TO_GUEST_MODE_WORK \
- (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
- _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
+ (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
+ ARCH_XFER_TO_GUEST_MODE_WORK)
struct kvm_vcpu;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 12f6dc567598..b8b935b52603 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -734,6 +734,9 @@ struct kernel_ethtool_ts_info {
* @rxfh_per_ctx_key: device supports setting different RSS key for each
* additional context. Netlink API should report hfunc, key, and input_xfrm
* for every context, not just context 0.
+ * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with
+ * %FLOW_RSS flag; the queue ID from the filter is added to the value from
+ * the indirection table to determine the delivery queue.
* @rxfh_indir_space: max size of RSS indirection tables, if indirection table
* size as returned by @get_rxfh_indir_size may change during lifetime
* of the device. Leave as 0 if the table size is constant.
@@ -956,6 +959,7 @@ struct ethtool_ops {
u32 cap_rss_ctx_supported:1;
u32 cap_rss_sym_xor_supported:1;
u32 rxfh_per_ctx_key:1;
+ u32 cap_rss_rxnfc_adds:1;
u32 rxfh_indir_space;
u16 rxfh_key_space;
u16 rxfh_priv_size;
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 3337745d81bd..0c0d00fcd131 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -42,7 +42,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(!file->f_ep))
+ if (likely(!READ_ONCE(file->f_ep)))
return;
/*
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 893a1d21dc1c..1ab165c2939f 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -250,19 +250,6 @@ struct export_operations {
unsigned long flags;
};
-/**
- * exportfs_lock_op_is_async() - export op supports async lock operation
- * @export_ops: the nfs export operations to check
- *
- * Returns true if the nfs export_operations structure has
- * EXPORT_OP_ASYNC_LOCK in their flags set
- */
-static inline bool
-exportfs_lock_op_is_async(const struct export_operations *export_ops)
-{
- return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
-}
-
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent,
int flags);
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index b1c5722f2b3c..c45306a9f007 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -92,10 +92,6 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-struct file *lookup_fdget_rcu(unsigned int fd);
-struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
-
static inline bool close_on_exec(unsigned int fd, const struct files_struct *files)
{
return test_bit(fd, files_fdtable(files)->close_on_exec);
@@ -115,7 +111,6 @@ int iterate_fd(struct files_struct *, unsigned,
const void *);
extern int close_fd(unsigned int fd);
-extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
extern struct file *file_close_fd(unsigned int fd);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/file.h b/include/linux/file.h
index f98de143245a..302f11355b10 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -30,12 +30,6 @@ extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount
extern struct file *alloc_file_clone(struct file *, int flags,
const struct file_operations *);
-static inline void fput_light(struct file *file, int fput_needed)
-{
- if (fput_needed)
- fput(file);
-}
-
/* either a reference to struct file + flags
* (cloned vs. borrowed, pos locked), with
* flags stored in lower bits of value,
@@ -72,6 +66,7 @@ static inline void fdput(struct fd fd)
extern struct file *fget(unsigned int fd);
extern struct file *fget_raw(unsigned int fd);
extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd);
extern void __f_unlock_pos(struct file *);
struct fd fdget(unsigned int fd);
@@ -87,6 +82,7 @@ static inline void fdput_pos(struct fd f)
DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)
+DEFINE_CLASS(fd_pos, struct fd, fdput_pos(_T), fdget_pos(fd), int fd)
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
new file mode 100644
index 000000000000..9b3a8d9b17ab
--- /dev/null
+++ b/include/linux/file_ref.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_FILE_REF_H
+#define _LINUX_FILE_REF_H
+
+#include <linux/atomic.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+/*
+ * file_ref is a reference count implementation specifically for use by
+ * files. It takes inspiration from rcuref but differs in key aspects
+ * such as support for SLAB_TYPESAFE_BY_RCU type caches.
+ *
+ * FILE_REF_ONEREF FILE_REF_MAXREF
+ * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL
+ * <-------------------valid ------------------->
+ *
+ * FILE_REF_SATURATED
+ * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL
+ * <-----------------------saturation zone---------------------->
+ *
+ * FILE_REF_RELEASED FILE_REF_DEAD
+ * 0xC000000000000000UL 0xE000000000000000UL
+ * <-------------------dead zone------------------->
+ *
+ * FILE_REF_NOREF
+ * 0xFFFFFFFFFFFFFFFFUL
+ */
+
+#ifdef CONFIG_64BIT
+#define FILE_REF_ONEREF 0x0000000000000000UL
+#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL
+#define FILE_REF_SATURATED 0xA000000000000000UL
+#define FILE_REF_RELEASED 0xC000000000000000UL
+#define FILE_REF_DEAD 0xE000000000000000UL
+#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL
+#else
+#define FILE_REF_ONEREF 0x00000000U
+#define FILE_REF_MAXREF 0x7FFFFFFFU
+#define FILE_REF_SATURATED 0xA0000000U
+#define FILE_REF_RELEASED 0xC0000000U
+#define FILE_REF_DEAD 0xE0000000U
+#define FILE_REF_NOREF 0xFFFFFFFFU
+#endif
+
+typedef struct {
+#ifdef CONFIG_64BIT
+ atomic64_t refcnt;
+#else
+ atomic_t refcnt;
+#endif
+} file_ref_t;
+
+/**
+ * file_ref_init - Initialize a file reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
+{
+ atomic_long_set(&ref->refcnt, cnt - 1);
+}
+
+bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
+
+/**
+ * file_ref_get - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF.
+ *
+ * Provides full memory ordering.
+ *
+ * Return: False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already. True if a reference
+ * was successfully acquired
+ */
+static __always_inline __must_check bool file_ref_get(file_ref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count with full
+ * ordering. The saturation and dead zones provide enough
+ * tolerance for this.
+ *
+ * If this indicates negative the file in question the fail can
+ * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU.
+ * Hence, unconditionally altering the file reference count to
+ * e.g., reset the file reference count back to the middle of
+ * the deadzone risk end up marking someone else's file as dead
+ * behind their back.
+ *
+ * It would be possible to do a careful:
+ *
+ * cnt = atomic_long_inc_return();
+ * if (likely(cnt >= 0))
+ * return true;
+ *
+ * and then something like:
+ *
+ * if (cnt >= FILE_REF_RELEASE)
+ * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD),
+ *
+ * to set the value back to the middle of the deadzone. But it's
+ * practically impossible to go from FILE_REF_DEAD to
+ * FILE_REF_ONEREF. It would need 2305843009213693952/2^61
+ * file_ref_get()s to resurrect such a dead file.
+ */
+ return !atomic_long_add_negative(1, &ref->refcnt);
+}
+
+/**
+ * file_ref_inc - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Acquire an additional reference on a file. Warns if the caller didn't
+ * already hold a reference.
+ */
+static __always_inline void file_ref_inc(file_ref_t *ref)
+{
+ long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt);
+ WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference");
+}
+
+/**
+ * file_ref_put -- Release a file reference
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores
+ * are done before, and provides an acquire ordering on success such
+ * that free() must come after.
+ *
+ * Return: True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release
+ * the object which is protected by the reference counter.
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
+{
+ long cnt;
+
+ /*
+ * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put()
+ * calls don't risk UAFs when a file is recyclyed, it is still
+ * vulnerable to UAFs caused by freeing the whole slab page once
+ * it becomes unused. Prevent file_ref_put() from being
+ * preempted protects against this.
+ */
+ guard(preempt)();
+ /*
+ * Unconditionally decrease the reference count. The saturation
+ * and dead zones provide enough tolerance for this. If this
+ * fails then we need to handle the last reference drop and
+ * cases inside the saturation and dead zones.
+ */
+ cnt = atomic_long_dec_return(&ref->refcnt);
+ if (cnt >= 0)
+ return false;
+ return __file_ref_put(ref, cnt);
+}
+
+/**
+ * file_ref_read - Read the number of file references
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned long file_ref_read(file_ref_t *ref)
+{
+ unsigned long c = atomic_long_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= FILE_REF_RELEASED ? 0 : c + 1;
+}
+
+#endif
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index bb44224c6676..c412ded9171e 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -180,6 +180,11 @@ static inline void locks_wake_up(struct file_lock *fl)
wake_up(&fl->c.flc_wait);
}
+static inline bool locks_can_async_lock(const struct file_operations *fops)
+{
+ return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK;
+}
+
/* fs/locks.c */
void locks_free_lock_context(struct inode *inode);
void locks_free_lock(struct file_lock *fl);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 7d7578a8eac1..3a21947f2fd4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1119,6 +1119,7 @@ bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+bool bpf_jit_supports_private_stack(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index 9f14976399ab..4621aec0328c 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -85,6 +85,8 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
bool qcom_scm_restore_sec_cfg_available(void);
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank);
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void);
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index d7d07afc0532..76d85ad82ec0 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2021 Xilinx
- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -32,6 +32,19 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+/* SMC function ID to get SiP SVC version */
+#define GET_SIP_SVC_VERSION (0x8200ff03U)
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR (0U)
+#define SIP_SVC_VERSION_MINOR (2U)
+
+#define SIP_SVC_PASSTHROUGH_VERSION ((SIP_SVC_VERSION_MAJOR << 16) | \
+ SIP_SVC_VERSION_MINOR)
+
+/* Fixed ID for FW specific APIs */
+#define PASS_THROUGH_FW_CMD_ID GENMASK(11, 0)
+
/* PM API versions */
#define PM_API_VERSION_1 1
#define PM_API_VERSION_2 2
@@ -51,6 +64,7 @@
#define API_ID_MASK GENMASK(7, 0)
#define MODULE_ID_MASK GENMASK(11, 8)
+#define PLM_MODULE_ID_MASK GENMASK(15, 8)
/* Firmware feature check version mask */
#define FIRMWARE_VERSION_MASK 0xFFFFU
@@ -62,7 +76,13 @@
#define GET_CALLBACK_DATA 0xa01
/* Number of 32bits values in payload */
-#define PAYLOAD_ARG_CNT 4U
+#define PAYLOAD_ARG_CNT 7U
+
+/* Number of 64bits arguments for SMC call */
+#define SMC_ARG_CNT_64 8U
+
+/* Number of 32bits arguments for SMC call */
+#define SMC_ARG_CNT_32 13U
/* Number of arguments for a callback */
#define CB_ARG_CNT 4
@@ -130,6 +150,7 @@
enum pm_module_id {
PM_MODULE_ID = 0x0,
+ XPM_MODULE_ID = 0x2,
XSEM_MODULE_ID = 0x3,
TF_A_MODULE_ID = 0xa,
};
@@ -218,9 +239,13 @@ enum pm_ioctl_id {
/* Runtime feature configuration */
IOCTL_SET_FEATURE_CONFIG = 26,
IOCTL_GET_FEATURE_CONFIG = 27,
+ /* IOCTL for Secure Read/Write Interface */
+ IOCTL_READ_REG = 28,
/* Dynamic SD/GEM configuration */
IOCTL_SET_SD_CONFIG = 30,
IOCTL_SET_GEM_CONFIG = 31,
+ /* IOCTL to get default/current QoS */
+ IOCTL_GET_QOS = 34,
};
enum pm_query_id {
@@ -533,6 +558,7 @@ struct zynqmp_pm_query_data {
};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_get_api_version(u32 *version);
@@ -553,9 +579,9 @@ int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
-int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag);
-int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status);
unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
int zynqmp_pm_bootmode_write(u32 ps_mode);
int zynqmp_pm_init_finalize(void);
@@ -698,14 +724,13 @@ static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
return -ENODEV;
}
-static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+static inline int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return -ENODEV;
}
-static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+static inline int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
{
return -ENODEV;
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3559446279c1..7e29433c5ecc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -45,6 +45,8 @@
#include <linux/slab.h>
#include <linux/maple_tree.h>
#include <linux/rw_hint.h>
+#include <linux/file_ref.h>
+#include <linux/unicode.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -623,6 +625,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_NOFOLLOW 0x0004
#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
+#define IOP_MGTIME 0x0020
/*
* Keep mostly read-only and often accessed (especially for
@@ -1005,7 +1008,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
/**
* struct file - Represents a file
- * @f_count: reference count
+ * @f_ref: reference count
* @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
* @f_mode: FMODE_* flags often used in hotpaths
* @f_op: file operations
@@ -1030,7 +1033,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
* @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
*/
struct file {
- atomic_long_t f_count;
+ file_ref_t f_ref;
spinlock_t f_lock;
fmode_t f_mode;
const struct file_operations *f_op;
@@ -1078,15 +1081,14 @@ struct file_handle {
static inline struct file *get_file(struct file *f)
{
- long prior = atomic_long_fetch_inc_relaxed(&f->f_count);
- WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n");
+ file_ref_inc(&f->f_ref);
return f;
}
struct file *get_file_rcu(struct file __rcu **f);
struct file *get_file_active(struct file **f);
-#define file_count(x) atomic_long_read(&(x)->f_count)
+#define file_count(f) file_ref_read(&(f)->f_ref)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1584,6 +1586,8 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
+struct timespec64 inode_set_ctime_deleg(struct inode *inode,
+ struct timespec64 update);
static inline time64_t inode_get_atime_sec(const struct inode *inode)
{
@@ -1653,6 +1657,17 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode,
return inode_set_mtime_to_ts(inode, ts);
}
+/*
+ * Multigrain timestamps
+ *
+ * Conditionally use fine-grained ctime and mtime timestamps when there
+ * are users actively observing them via getattr. The primary use-case
+ * for this is NFS clients that use the ctime to distinguish between
+ * different states of the file, and that are often fooled by multiple
+ * operations that occur in the same coarse-grained timer tick.
+ */
+#define I_CTIME_QUERIED ((u32)BIT(31))
+
static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
return inode->i_ctime_sec;
@@ -1660,7 +1675,7 @@ static inline time64_t inode_get_ctime_sec(const struct inode *inode)
static inline long inode_get_ctime_nsec(const struct inode *inode)
{
- return inode->i_ctime_nsec;
+ return inode->i_ctime_nsec & ~I_CTIME_QUERIED;
}
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
@@ -1671,13 +1686,7 @@ static inline struct timespec64 inode_get_ctime(const struct inode *inode)
return ts;
}
-static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
- struct timespec64 ts)
-{
- inode->i_ctime_sec = ts.tv_sec;
- inode->i_ctime_nsec = ts.tv_nsec;
- return ts;
-}
+struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts);
/**
* inode_set_ctime - set the ctime in the inode
@@ -2116,6 +2125,8 @@ struct file_operations {
#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
/* Treat loff_t as unsigned (e.g., /dev/mem) */
#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
+/* Supports asynchronous lock callbacks */
+#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -2542,6 +2553,7 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
+#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2565,6 +2577,17 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
+/**
+ * is_mgtime: is this inode using multigrain timestamps
+ * @inode: inode to test for multigrain timestamps
+ *
+ * Return true if the inode uses multigrain timestamps, false otherwise.
+ */
+static inline bool is_mgtime(const struct inode *inode)
+{
+ return inode->i_opflags & IOP_MGTIME;
+}
+
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2766,6 +2789,16 @@ extern struct filename *getname_flags(const char __user *, int);
extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
+extern struct filename *__getname_maybe_null(const char __user *);
+static inline struct filename *getname_maybe_null(const char __user *name, int flags)
+{
+ if (!(flags & AT_EMPTY_PATH))
+ return getname(name);
+
+ if (!name)
+ return NULL;
+ return __getname_maybe_null(name);
+}
extern void putname(struct filename *name);
extern int finish_open(struct file *file, struct dentry *dentry,
@@ -3326,6 +3359,7 @@ extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
+void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
void generic_fill_statx_atomic_writes(struct kstat *stat,
@@ -3456,6 +3490,54 @@ extern int generic_ci_match(const struct inode *parent,
const struct qstr *folded_name,
const u8 *de_name, u32 de_name_len);
+#if IS_ENABLED(CONFIG_UNICODE)
+int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
+int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name);
+
+/**
+ * generic_ci_validate_strict_name - Check if a given name is suitable
+ * for a directory
+ *
+ * This functions checks if the proposed filename is valid for the
+ * parent directory. That means that only valid UTF-8 filenames will be
+ * accepted for casefold directories from filesystems created with the
+ * strict encoding flag. That also means that any name will be
+ * accepted for directories that doesn't have casefold enabled, or
+ * aren't being strict with the encoding.
+ *
+ * @dir: inode of the directory where the new file will be created
+ * @name: name of the new file
+ *
+ * Return:
+ * * True: if the filename is suitable for this directory. It can be
+ * true if a given name is not suitable for a strict encoding
+ * directory, but the directory being used isn't strict
+ * * False if the filename isn't suitable for this directory. This only
+ * happens when a directory is casefolded and the filesystem is strict
+ * about its encoding.
+ */
+static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name)
+{
+ if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb))
+ return true;
+
+ /*
+ * A casefold dir must have a encoding set, unless the filesystem
+ * is corrupted
+ */
+ if (WARN_ON_ONCE(!dir->i_sb->s_encoding))
+ return true;
+
+ return !utf8_validate(dir->i_sb->s_encoding, name);
+}
+#else
+static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name)
+{
+ return true;
+}
+#endif
+
static inline bool sb_has_encoding(const struct super_block *sb)
{
#if IS_ENABLED(CONFIG_UNICODE)
@@ -3726,6 +3808,6 @@ static inline bool vfs_empty_path(int dfd, const char __user *path)
return !c;
}
-bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos);
+int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 6cf713a7e6c6..3cef566088fc 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -28,7 +28,8 @@ typedef int fs_param_type(struct p_log *,
*/
fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
- fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid;
+ fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid,
+ fs_param_is_file_or_string;
/*
* Specification of the type of value a parameter wants.
@@ -133,6 +134,8 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+#define fsparam_file_or_string(NAME, OPT) \
+ __fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL)
#define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL)
#define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL)
diff --git a/include/linux/fsl/netc_global.h b/include/linux/fsl/netc_global.h
new file mode 100644
index 000000000000..fdecca8c90f0
--- /dev/null
+++ b/include/linux/fsl/netc_global.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP
+ */
+#ifndef __NETC_GLOBAL_H
+#define __NETC_GLOBAL_H
+
+#include <linux/io.h>
+
+static inline u32 netc_read(void __iomem *reg)
+{
+ return ioread32(reg);
+}
+
+static inline void netc_write(void __iomem *reg, u32 val)
+{
+ iowrite32(val, reg);
+}
+
+#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index fd5e84d0ec47..aa9ddd1e4bb6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -113,14 +113,54 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
#ifdef CONFIG_FUNCTION_TRACER
-extern int ftrace_enabled;
+#include <linux/ftrace_regs.h>
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+extern int ftrace_enabled;
+/**
+ * ftrace_regs - ftrace partial/optimal register set
+ *
+ * ftrace_regs represents a group of registers which is used at the
+ * function entry and exit. There are three types of registers.
+ *
+ * - Registers for passing the parameters to callee, including the stack
+ * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64)
+ * - Registers for passing the return values to caller.
+ * (e.g. rax and rdx on x86_64)
+ * - Registers for hooking the function call and return including the
+ * frame pointer (the frame pointer is architecture/config dependent)
+ * (e.g. rip, rbp and rsp for x86_64)
+ *
+ * Also, architecture dependent fields can be used for internal process.
+ * (e.g. orig_ax on x86_64)
+ *
+ * On the function entry, those registers will be restored except for
+ * the stack pointer, so that user can change the function parameters
+ * and instruction pointer (e.g. live patching.)
+ * On the function exit, only registers which is used for return values
+ * are restored.
+ *
+ * NOTE: user *must not* access regs directly, only do it via APIs, because
+ * the member can be changed according to the architecture.
+ * This is why the structure is empty here, so that nothing accesses
+ * the ftrace_regs directly.
+ */
struct ftrace_regs {
- struct pt_regs regs;
+ /* Nothing to see here, use the accessor functions! */
};
-#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
+
+#define ftrace_regs_size() sizeof(struct __arch_ftrace_regs)
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+/*
+ * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own
+ * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully
+ * populated. It should return NULL otherwise.
+ */
+static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
/*
* ftrace_regs_set_instruction_pointer() is to be defined by the architecture
@@ -150,23 +190,6 @@ static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
return ftrace_get_regs(fregs) != NULL;
}
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
-#define ftrace_regs_get_instruction_pointer(fregs) \
- instruction_pointer(ftrace_get_regs(fregs))
-#define ftrace_regs_get_argument(fregs, n) \
- regs_get_kernel_argument(ftrace_get_regs(fregs), n)
-#define ftrace_regs_get_stack_pointer(fregs) \
- kernel_stack_pointer(ftrace_get_regs(fregs))
-#define ftrace_regs_return_value(fregs) \
- regs_return_value(ftrace_get_regs(fregs))
-#define ftrace_regs_set_return_value(fregs, ret) \
- regs_set_return_value(ftrace_get_regs(fregs), ret)
-#define ftrace_override_function_with_return(fregs) \
- override_function_with_return(ftrace_get_regs(fregs))
-#define ftrace_regs_query_register_offset(name) \
- regs_query_register_offset(name)
-#endif
-
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
@@ -1015,6 +1038,17 @@ struct ftrace_graph_ent {
} __packed;
/*
+ * Structure that defines an entry function trace with retaddr.
+ * It's already packed but the attribute "packed" is needed
+ * to remove extra padding at the end.
+ */
+struct fgraph_retaddr_ent {
+ unsigned long func; /* Current function */
+ int depth;
+ unsigned long retaddr; /* Return address */
+} __packed;
+
+/*
* Structure that defines a return function trace.
* It's already packed but the attribute "packed" is needed
* to remove extra padding at the end.
@@ -1039,7 +1073,8 @@ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
struct fgraph_ops *); /* entry */
-extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops);
bool ftrace_pids_enabled(struct ftrace_ops *ops);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -1055,6 +1090,7 @@ struct fgraph_ops {
void *fgraph_reserve_data(int idx, int size_bytes);
void *fgraph_retrieve_data(int idx, int *size_bytes);
+void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
/*
* Stack of return addresses for functions
@@ -1064,10 +1100,6 @@ void *fgraph_retrieve_data(int idx, int *size_bytes);
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
- unsigned long long calltime;
-#ifdef CONFIG_FUNCTION_PROFILER
- unsigned long long subtime;
-#endif
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
@@ -1087,6 +1119,7 @@ function_graph_enter(unsigned long ret, unsigned long func,
struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
+unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
new file mode 100644
index 000000000000..be1ed0c891d0
--- /dev/null
+++ b/include/linux/ftrace_regs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FTRACE_REGS_H
+#define _LINUX_FTRACE_REGS_H
+
+/*
+ * For archs that just copy pt_regs in ftrace regs, it can use this default.
+ * If an architecture does not use pt_regs, it must define all the below
+ * accessor functions.
+ */
+#ifndef HAVE_ARCH_FTRACE_REGS
+struct __arch_ftrace_regs {
+ struct pt_regs regs;
+};
+
+#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs))
+
+struct ftrace_regs;
+
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&arch_ftrace_regs(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_return_value(fregs) \
+ regs_return_value(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&arch_ftrace_regs(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+
+#endif /* HAVE_ARCH_FTRACE_REGS */
+
+#endif /* _LINUX_FTRACE_REGS_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a951de920e20..a0a6d25f883f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -371,28 +371,6 @@ __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mas
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-struct page_frag_cache;
-void page_frag_cache_drain(struct page_frag_cache *nc);
-extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
- gfp_t gfp_mask, unsigned int align_mask);
-
-static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align)
-{
- WARN_ON_ONCE(!is_power_of_2(align));
- return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
-}
-
-static inline void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask)
-{
- return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
-}
-
-extern void page_frag_free(void *addr);
-
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 2d105be7bbc3..6270150f4e29 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -21,9 +21,6 @@ struct device;
#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1))
#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1))
-/* Gpio pin is active-low */
-#define GPIOF_ACTIVE_LOW (1 << 2)
-
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 121d5b8bc867..d11e9c9a5f15 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -359,6 +359,7 @@ struct hid_item {
* | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
* | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
* | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
* | @HID_QUIRK_FULLSPEED_INTERVAL:
* | @HID_QUIRK_NO_INIT_REPORTS:
* | @HID_QUIRK_NO_IGNORE:
@@ -384,6 +385,7 @@ struct hid_item {
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
#define HID_QUIRK_NOINVERT BIT(21)
+#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -599,15 +601,17 @@ enum hid_battery_status {
struct hid_driver;
struct hid_ll_driver;
-struct hid_device { /* device report descriptor */
- const __u8 *dev_rdesc;
- unsigned dev_rsize;
- const __u8 *rdesc;
- unsigned rsize;
+struct hid_device {
+ const __u8 *dev_rdesc; /* device report descriptor */
+ const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */
+ const __u8 *rdesc; /* currently used report descriptor */
+ unsigned int dev_rsize;
+ unsigned int bpf_rsize;
+ unsigned int rsize;
+ unsigned int collection_size; /* Number of allocated hid_collections */
struct hid_collection *collection; /* List of HID collections */
- unsigned collection_size; /* Number of allocated hid_collections */
- unsigned maxcollection; /* Number of parsed collections */
- unsigned maxapplication; /* Number of applications */
+ unsigned int maxcollection; /* Number of parsed collections */
+ unsigned int maxapplication; /* Number of applications */
__u16 bus; /* BUS ID */
__u16 group; /* Report group */
__u32 vendor; /* Vendor ID */
@@ -974,7 +978,6 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev,
struct hid_driver *hdrv);
bool hid_compare_device_paths(struct hid_device *hdev_a,
struct hid_device *hdev_b, char separator);
-s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index 6a47223e6460..a6876ab29004 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -212,7 +212,7 @@ int hid_bpf_connect_device(struct hid_device *hdev);
void hid_bpf_disconnect_device(struct hid_device *hdev);
void hid_bpf_destroy_device(struct hid_device *hid);
int hid_bpf_device_init(struct hid_device *hid);
-u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
+const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
#else /* CONFIG_HID_BPF */
static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
u8 *data, u32 *size, int interrupt,
@@ -228,13 +228,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; }
-/*
- * This specialized allocator has to be a macro for its allocations to be
- * accounted separately (to have a separate alloc_tag). The typecast is
- * intentional to enforce typesafety.
- */
-#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \
- ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL))
+static inline const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc,
+ unsigned int *size) { return rdesc; }
#endif /* CONFIG_HID_BPF */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 9d7754ad5e9b..6dbd0d49628f 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -229,6 +229,12 @@ struct hisi_qm_status {
struct hisi_qm;
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
@@ -257,9 +263,9 @@ struct hisi_qm_err_ini {
void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*open_sva_prefetch)(struct hisi_qm *qm);
void (*close_sva_prefetch)(struct hisi_qm *qm);
- void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
void (*show_last_dfx_regs)(struct hisi_qm *qm);
void (*err_info_init)(struct hisi_qm *qm);
+ enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
};
struct hisi_qm_cap_info {
@@ -274,13 +280,25 @@ struct hisi_qm_cap_info {
u32 v3_val;
};
+struct hisi_qm_cap_query_info {
+ u32 type;
+ const char *name;
+ u32 offset;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
struct hisi_qm_cap_record {
u32 type;
+ const char *name;
u32 cap_val;
};
struct hisi_qm_cap_tables {
+ u32 qm_cap_size;
struct hisi_qm_cap_record *qm_cap_table;
+ u32 dev_cap_size;
struct hisi_qm_cap_record *dev_cap_table;
};
@@ -436,37 +454,6 @@ struct hisi_qp {
struct uacce_queue *uacce_q;
};
-static inline int q_num_set(const char *val, const struct kernel_param *kp,
- unsigned int device)
-{
- struct pci_dev *pdev;
- u32 n, q_num;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
- if (!pdev) {
- q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
- pr_info("No device found currently, suppose queue number is %u\n",
- q_num);
- } else {
- if (pdev->revision == QM_HW_V1)
- q_num = QM_QNUM_V1;
- else
- q_num = QM_QNUM_V2;
-
- pci_dev_put(pdev);
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret || n < QM_MIN_QNUM || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
{
u32 n;
@@ -526,6 +513,8 @@ static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_
mutex_unlock(&qm_list->lock);
}
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device);
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
@@ -583,6 +572,9 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
const struct hisi_qm_cap_info *info_table,
u32 index, bool is_read);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read);
int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
u32 dev_algs_size);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index aa1e65ccb615..7ef5f7ef31a9 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -228,32 +228,17 @@ static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
- enum hrtimer_mode mode);
+extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_on_stack(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id,
+ enum hrtimer_mode mode);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
-extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id,
- enum hrtimer_mode mode);
-
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-static inline void hrtimer_init_on_stack(struct hrtimer *timer,
- clockid_t which_clock,
- enum hrtimer_mode mode)
-{
- hrtimer_init(timer, which_clock, mode);
-}
-
-static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id,
- enum hrtimer_mode mode)
-{
- hrtimer_init_sleeper(sl, clock_id, mode);
-}
-
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
@@ -337,6 +322,28 @@ static inline int hrtimer_callback_running(struct hrtimer *timer)
return timer->base->running == timer;
}
+/**
+ * hrtimer_update_function - Update the timer's callback function
+ * @timer: Timer to update
+ * @function: New callback function
+ *
+ * Only safe to call if the timer is not enqueued. Can be called in the callback function if the
+ * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED).
+ */
+static inline void hrtimer_update_function(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *))
+{
+ guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
+
+ if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
+ return;
+
+ if (WARN_ON_ONCE(!function))
+ return;
+
+ timer->function = function;
+}
+
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 5c6a421ad580..3a63dff62d03 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -368,7 +368,9 @@ enum hwmon_intrusion_attributes {
/**
* struct hwmon_ops - hwmon device operations
- * @is_visible: Callback to return attribute visibility. Mandatory.
+ * @visible: Static visibility. If non-zero, 'is_visible' is ignored.
+ * @is_visible: Callback to return attribute visibility. Mandatory unless
+ * 'visible' is non-zero.
* Parameters are:
* @const void *drvdata:
* Pointer to driver-private data structure passed
@@ -412,6 +414,7 @@ enum hwmon_intrusion_attributes {
* The function returns 0 on success or a negative error number.
*/
struct hwmon_ops {
+ umode_t visible;
umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 456bca45ff05..05dedc45505c 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1445,6 +1445,8 @@ struct ieee80211_mgmt {
__le16 status;
__le16 capab;
__le16 timeout;
+ /* followed by BA Extension */
+ u8 variable[];
} __packed addba_resp;
struct{
u8 action_code;
diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h
deleted file mode 100644
index 4cc1c0b77870..000000000000
--- a/include/linux/if_ltalk.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_LTALK_H
-#define __LINUX_LTALK_H
-
-#include <uapi/linux/if_ltalk.h>
-
-extern struct net_device *alloc_ltalkdev(int sizeof_priv);
-#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index cb5280e6cc21..5730ba6b1cfa 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -141,7 +141,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ARP_EVICT_NOCARRIER)
struct in_ifaddr {
- struct hlist_node hash;
+ struct hlist_node addr_lst;
struct in_ifaddr __rcu *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
@@ -226,6 +226,10 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr)
for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \
ifa = rtnl_dereference(ifa->ifa_next))
+#define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \
+ for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \
+ ifa = rtnl_net_dereference(net, ifa->ifa_next))
+
#define in_dev_for_each_ifa_rcu(ifa, in_dev) \
for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \
ifa = rcu_dereference(ifa->ifa_next))
@@ -252,6 +256,11 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev)
+{
+ return rtnl_net_dereference(dev_net(dev), dev->ip_ptr);
+}
+
/* called with rcu_read_lock or rtnl held */
static inline bool ip_ignore_linkdown(const struct net_device *dev)
{
diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
index 11ee185566c3..b94beab64610 100644
--- a/include/linux/intel_vsec.h
+++ b/include/linux/intel_vsec.h
@@ -74,10 +74,11 @@ enum intel_vsec_quirks {
* @pdev: PCI device reference for the callback's use
* @guid: ID of data to acccss
* @data: buffer for the data to be copied
+ * @off: offset into the requested buffer
* @count: size of buffer
*/
struct pmt_callbacks {
- int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count);
+ int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count);
};
/**
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 457151f9f263..8cd9327e4e78 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -616,6 +616,53 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+/*
+ * With forced-threaded interrupts enabled a raised softirq is deferred to
+ * ksoftirqd unless it can be handled within the threaded interrupt. This
+ * affects timer_list timers and hrtimers which are explicitly marked with
+ * HRTIMER_MODE_SOFT.
+ * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
+ * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
+ * Userspace controlled timers (like the clock_nanosleep() interface) is divided
+ * into two categories: Tasks with elevated scheduling policy including
+ * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
+ * elevated scheduling policy are woken up directly from the HARDIRQ while all
+ * other wake ups are delayed to softirq and so to ksoftirqd.
+ *
+ * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
+ * handles the softirq in an overloaded situation (not handled everything
+ * within its last run).
+ * If the timers are handled at SCHED_OTHER priority then they competes with all
+ * other SCHED_OTHER tasks for CPU resources are possibly delayed.
+ * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
+ * that timer are performed before scheduling any SCHED_OTHER thread.
+ */
+DECLARE_PER_CPU(struct task_struct *, ktimerd);
+DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
+void raise_ktimers_thread(unsigned int nr);
+
+static inline unsigned int local_timers_pending_force_th(void)
+{
+ return __this_cpu_read(pending_timer_softirq);
+}
+
+static inline void raise_timer_softirq(unsigned int nr)
+{
+ lockdep_assert_in_irq();
+ if (force_irqthreads())
+ raise_ktimers_thread(nr);
+ else
+ __raise_softirq_irqoff(nr);
+}
+
+static inline unsigned int local_timers_pending(void)
+{
+ if (force_irqthreads())
+ return local_timers_pending_force_th();
+ else
+ return local_softirq_pending();
+}
+
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index c189d36ad55e..578a3fdf5c71 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -110,7 +110,7 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
{
- return cmd_to_io_kiocb(cmd)->task;
+ return cmd_to_io_kiocb(cmd)->tctx->task;
}
#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 4b9ba523978d..593c10a02144 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -37,6 +37,7 @@ enum io_uring_cmd_flags {
/* set when uring wants to cancel a previously issued command */
IO_URING_F_CANCEL = (1 << 11),
IO_URING_F_COMPAT = (1 << 12),
+ IO_URING_F_TASK_DEAD = (1 << 13),
};
struct io_wq_work_node {
@@ -55,19 +56,18 @@ struct io_wq_work {
int cancel_seq;
};
-struct io_fixed_file {
- /* file * with additional FFS_* flags */
- unsigned long file_ptr;
+struct io_rsrc_data {
+ unsigned int nr;
+ struct io_rsrc_node **nodes;
};
struct io_file_table {
- struct io_fixed_file *files;
+ struct io_rsrc_data data;
unsigned long *bitmap;
unsigned int alloc_hint;
};
struct io_hash_bucket {
- spinlock_t lock;
struct hlist_head list;
} ____cacheline_aligned_in_smp;
@@ -76,6 +76,12 @@ struct io_hash_table {
unsigned hash_bits;
};
+struct io_mapped_region {
+ struct page **pages;
+ void *vmap_ptr;
+ size_t nr_pages;
+};
+
/*
* Arbitrary limit, can be raised if need be
*/
@@ -85,6 +91,7 @@ struct io_uring_task {
/* submission side */
int cached_refs;
const struct io_ring_ctx *last;
+ struct task_struct *task;
struct io_wq *io_wq;
struct file *registered_rings[IO_RINGFD_REG_MAX];
@@ -270,7 +277,6 @@ struct io_ring_ctx {
* Fixed resources fast path, should be accessed only under
* uring_lock, and updated through io_uring_register(2)
*/
- struct io_rsrc_node *rsrc_node;
atomic_t cancel_seq;
/*
@@ -283,15 +289,13 @@ struct io_ring_ctx {
struct io_wq_work_list iopoll_list;
struct io_file_table file_table;
- struct io_mapped_ubuf **user_bufs;
- unsigned nr_user_files;
- unsigned nr_user_bufs;
+ struct io_rsrc_data buf_table;
struct io_submit_state submit_state;
struct xarray io_bl_xa;
- struct io_hash_table cancel_table_locked;
+ struct io_hash_table cancel_table;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
struct io_alloc_cache rw_cache;
@@ -302,6 +306,11 @@ struct io_ring_ctx {
* ->uring_cmd() by io_uring_cmd_insert_cancelable()
*/
struct hlist_head cancelable_uring_cmd;
+ /*
+ * For Hybrid IOPOLL, runtime in hybrid polling, without
+ * scheduling time
+ */
+ u64 hybrid_poll_time;
} ____cacheline_aligned_in_smp;
struct {
@@ -316,6 +325,9 @@ struct io_ring_ctx {
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
unsigned cq_extra;
+
+ void *cq_wait_arg;
+ size_t cq_wait_size;
} ____cacheline_aligned_in_smp;
/*
@@ -342,7 +354,6 @@ struct io_ring_ctx {
struct list_head io_buffers_comp;
struct list_head cq_overflow_list;
- struct io_hash_table cancel_table;
struct hlist_head waitid_list;
@@ -366,16 +377,6 @@ struct io_ring_ctx {
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
- /* slow path rsrc auxilary data, used by update/register */
- struct io_rsrc_data *file_data;
- struct io_rsrc_data *buf_data;
-
- /* protected by ->uring_lock */
- struct list_head rsrc_ref_list;
- struct io_alloc_cache rsrc_node_cache;
- struct wait_queue_head rsrc_quiesce_wq;
- unsigned rsrc_quiesce;
-
u32 pers_next;
struct xarray personalities;
@@ -409,7 +410,7 @@ struct io_ring_ctx {
/* napi busy poll default timeout */
ktime_t napi_busy_poll_dt;
bool napi_prefer_busy_poll;
- bool napi_enabled;
+ u8 napi_track_mode;
DECLARE_HASHTABLE(napi_ht, 4);
#endif
@@ -418,6 +419,13 @@ struct io_ring_ctx {
unsigned evfd_last_cq_tail;
/*
+ * Protection for resize vs mmap races - both the mmap and resize
+ * side will need to grab this lock, to prevent either side from
+ * being run concurrently with the other.
+ */
+ struct mutex resize_lock;
+
+ /*
* If IORING_SETUP_NO_MMAP is used, then the below holds
* the gup'ed pages for the two rings, and the sqes.
*/
@@ -425,6 +433,9 @@ struct io_ring_ctx {
unsigned short n_sqe_pages;
struct page **ring_pages;
struct page **sqe_pages;
+
+ /* used for optimised request parameter and wait argument passing */
+ struct io_mapped_region param_region;
};
struct io_tw_state {
@@ -447,6 +458,7 @@ enum {
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_POLLED_BIT,
+ REQ_F_HYBRID_IOPOLL_STATE_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_BUFFER_RING_BIT,
REQ_F_REISSUE_BIT,
@@ -459,7 +471,6 @@ enum {
REQ_F_DOUBLE_POLL_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
REQ_F_CLEAR_POLLIN_BIT,
- REQ_F_HASH_LOCKED_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
@@ -468,6 +479,7 @@ enum {
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
+ REQ_F_BUF_NODE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -506,6 +518,8 @@ enum {
REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
/* already went through poll handler */
REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
+ /* every req only blocks once in hybrid poll */
+ REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
/* buffer already selected */
REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
/* buffer selected from ring, needs commit */
@@ -534,8 +548,6 @@ enum {
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
/* recvmsg special flag, clear EPOLLIN */
REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
- /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
- REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
/* don't use lazy poll wake for this request */
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
/* file is pollable */
@@ -546,6 +558,8 @@ enum {
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
/* buffer ring head needs incrementing on put */
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
+ /* buf node is valid */
+ REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
@@ -615,12 +629,9 @@ struct io_kiocb {
struct io_cqe cqe;
struct io_ring_ctx *ctx;
- struct task_struct *task;
+ struct io_uring_task *tctx;
union {
- /* store used ubuf, so we can prevent reloading */
- struct io_mapped_ubuf *imu;
-
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
struct io_buffer *kbuf;
@@ -629,6 +640,8 @@ struct io_kiocb {
* REQ_F_BUFFER_RING is set.
*/
struct io_buffer_list *buf_list;
+
+ struct io_rsrc_node *buf_node;
};
union {
@@ -638,13 +651,20 @@ struct io_kiocb {
__poll_t apoll_events;
};
- struct io_rsrc_node *rsrc_node;
+ struct io_rsrc_node *file_node;
atomic_t refs;
bool cancel_seq_set;
struct io_task_work io_task_work;
- /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
- struct hlist_node hash_node;
+ union {
+ /*
+ * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
+ * poll
+ */
+ struct hlist_node hash_node;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ };
/* internal polling, see IORING_FEAT_FAST_POLL */
struct async_poll *apoll;
/* opcode allocated if it needs to store data for async defer */
@@ -667,4 +687,9 @@ struct io_overflow_cqe {
struct io_uring_cqe cqe;
};
+static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
+{
+ return ctx->flags & IORING_SETUP_CQE32;
+}
+
#endif
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index f61407e3b121..27048ec10e1c 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -178,6 +178,7 @@ struct iomap_folio_ops {
#else
#define IOMAP_DAX 0
#endif /* CONFIG_FS_DAX */
+#define IOMAP_ATOMIC (1 << 9)
struct iomap_ops {
/*
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 19a7b00baff4..91324c331a4b 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -19,19 +19,19 @@
* @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
* @sleep_before_read: if it is true, sleep @sleep_us before read.
* @args: arguments for @op poll
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
sleep_before_read, args...) \
@@ -64,22 +64,22 @@
* @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
* @delay_before_read: if it is true, delay @delay_us before read.
* @args: arguments for @op poll
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val.
- *
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
* time, which is rather minimal with a non-zero delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val.
*/
#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
delay_before_read, args...) \
@@ -119,17 +119,17 @@
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
@@ -140,16 +140,16 @@
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
*/
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3f003d5fde53..57b074e0cfbb 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -18,6 +18,8 @@
#include <asm/irqflags.h>
#include <asm/percpu.h>
+struct task_struct;
+
/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
extern void lockdep_softirqs_on(unsigned long ip);
@@ -25,12 +27,16 @@
extern void lockdep_hardirqs_on_prepare(void);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
+ extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle);
#else
static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on_prepare(void) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
+ static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle) {}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 3496baa0b07f..e97206c721a0 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -5,30 +5,36 @@
#include <uapi/linux/irqnr.h>
-extern int nr_irqs;
+unsigned int irq_get_nr_irqs(void) __pure;
+unsigned int irq_set_nr_irqs(unsigned int nr);
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
-# define for_each_irq_desc(irq, desc) \
- for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
- irq++, desc = irq_to_desc(irq)) \
- if (!desc) \
- ; \
- else
-
+#define for_each_irq_desc(irq, desc) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \
+ irq++, desc = irq_to_desc(irq)) \
+ if (!desc) \
+ ; \
+ else
# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
- irq--, desc = irq_to_desc(irq)) \
+ for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \
+ irq >= 0; irq--, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
-# define for_each_active_irq(irq) \
- for (irq = irq_get_next_irq(0); irq < nr_irqs; \
- irq = irq_get_next_irq(irq + 1))
+#define for_each_active_irq(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \
+ irq = irq_get_next_irq(irq + 1))
-#define for_each_irq_nr(irq) \
- for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0; irq < __nr_irqs__; irq++)
#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 8aef9bb6ad57..50f7ea8714bf 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1796,22 +1796,21 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[JBD_MAX_CHECKSUM_SIZE];
- } desc;
+ DEFINE_RAW_FLEX(struct shash_desc, desc, __ctx,
+ DIV_ROUND_UP(JBD_MAX_CHECKSUM_SIZE,
+ sizeof(*((struct shash_desc *)0)->__ctx)));
int err;
BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
JBD_MAX_CHECKSUM_SIZE);
- desc.shash.tfm = journal->j_chksum_driver;
- *(u32 *)desc.ctx = crc;
+ desc->tfm = journal->j_chksum_driver;
+ *(u32 *)desc->__ctx = crc;
- err = crypto_shash_update(&desc.shash, address, length);
+ err = crypto_shash_update(desc, address, length);
BUG_ON(err);
- return *(u32 *)desc.ctx;
+ return *(u32 *)desc->__ctx;
}
/* Return most recent uncommitted transaction */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1220f0fbe5bf..ed945f42e064 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -502,7 +502,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
- * for the details see __msecs_to_jiffies()
+ * for the details see _msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
@@ -526,6 +526,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
}
}
+/**
+ * secs_to_jiffies: - convert seconds to jiffies
+ * @_secs: time in seconds
+ *
+ * Conversion is done by simple multiplication with HZ
+ *
+ * secs_to_jiffies() is defined as a macro rather than a static inline
+ * function so it can be used in static initializers.
+ *
+ * Return: jiffies value
+ */
+#define secs_to_jiffies(_secs) ((_secs) * HZ)
+
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 9b4a6ff03235..c1a85d46eba6 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -270,9 +270,7 @@ enum {
/* bits 24:31 of host->flags are reserved for LLD specific flags */
- /* various lengths of time */
- ATA_TMOUT_BOOT = 30000, /* heuristic */
- ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ /* Various lengths of time */
ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 217f7abf2cbf..67964dc4db95 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -173,7 +173,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
(lock)->dep_map.lock_type)
#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
+ lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index babf4e3c28ba..8f1a9408302f 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -17,7 +17,7 @@ enum {
struct logic_pio_hwaddr {
struct list_head list;
- struct fwnode_handle *fwnode;
+ const struct fwnode_handle *fwnode;
resource_size_t hw_start;
resource_size_t io_start;
resource_size_t size; /* range size populated */
@@ -110,8 +110,8 @@ void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
#endif /* CONFIG_INDIRECT_PIO */
#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
-struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
-unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode,
resource_size_t hw_addr, resource_size_t size);
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
diff --git a/include/linux/lsm/apparmor.h b/include/linux/lsm/apparmor.h
new file mode 100644
index 000000000000..612cbfacb072
--- /dev/null
+++ b/include/linux/lsm/apparmor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * AppArmor presents single pointer to an aa_label structure.
+ */
+#ifndef __LINUX_LSM_APPARMOR_H
+#define __LINUX_LSM_APPARMOR_H
+
+struct aa_label;
+
+struct lsm_prop_apparmor {
+#ifdef CONFIG_SECURITY_APPARMOR
+ struct aa_label *label;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_APPARMOR_H */
diff --git a/include/linux/lsm/bpf.h b/include/linux/lsm/bpf.h
new file mode 100644
index 000000000000..8106e206fcef
--- /dev/null
+++ b/include/linux/lsm/bpf.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * BPF may present a single u32 value.
+ */
+#ifndef __LINUX_LSM_BPF_H
+#define __LINUX_LSM_BPF_H
+#include <linux/types.h>
+
+struct lsm_prop_bpf {
+#ifdef CONFIG_BPF_LSM
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_BPF_H */
diff --git a/include/linux/lsm/selinux.h b/include/linux/lsm/selinux.h
new file mode 100644
index 000000000000..9455a6b5b910
--- /dev/null
+++ b/include/linux/lsm/selinux.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * SELinux presents a single u32 value which is known as a secid.
+ */
+#ifndef __LINUX_LSM_SELINUX_H
+#define __LINUX_LSM_SELINUX_H
+#include <linux/types.h>
+
+struct lsm_prop_selinux {
+#ifdef CONFIG_SECURITY_SELINUX
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SELINUX_H */
diff --git a/include/linux/lsm/smack.h b/include/linux/lsm/smack.h
new file mode 100644
index 000000000000..ff730dd7a734
--- /dev/null
+++ b/include/linux/lsm/smack.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * Smack presents a pointer into the global Smack label list.
+ */
+#ifndef __LINUX_LSM_SMACK_H
+#define __LINUX_LSM_SMACK_H
+
+struct smack_known;
+
+struct lsm_prop_smack {
+#ifdef CONFIG_SECURITY_SMACK
+ struct smack_known *skp;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SMACK_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 9eca013aa5e1..eb2937599cb0 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -176,7 +176,8 @@ LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
size_t buffer_size)
-LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
const char *name)
@@ -217,6 +218,8 @@ LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
const struct cred *old)
LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
@@ -235,9 +238,9 @@ LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
-LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid)
-LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj,
- struct task_struct *p, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop)
+LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj,
+ struct task_struct *p, struct lsm_prop *prop)
LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
@@ -256,8 +259,8 @@ LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
struct inode *inode)
LSM_HOOK(int, 0, userns_create, const struct cred *cred)
LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
-LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
- u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
@@ -294,6 +297,8 @@ LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
LSM_HOOK(int, 0, ismaclabel, const char *name)
LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
u32 *seclen)
+LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop,
+ char **secdata, u32 *seclen)
LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
@@ -416,7 +421,8 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
-LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
#endif /* CONFIG_AUDIT */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index efeca5bd7600..3c3deac57894 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -165,31 +165,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio,
bool sense);
extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
-extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd,
- u32 npage_adv, u32 npage_lpa);
extern void
mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
struct ethtool_link_ksettings *cmd,
u32 npage_adv, u32 npage_lpa);
/**
- * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * Since the CSRs for auto-negotiation using next pages are not fully
- * standardised, this function does not attempt to decode them. Use
- * mdio45_ethtool_gset_npage() to specify advertisement bits from next
- * pages.
- */
-static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd)
-{
- mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
-}
-
-/**
* mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
* @mdio: MDIO interface
* @cmd: Ethtool request structure
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 34d2da05f2f1..e1b41554a5fb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1760,8 +1760,9 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
-static inline void count_objcg_event(struct obj_cgroup *objcg,
- enum vm_event_item idx)
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1770,7 +1771,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
@@ -1825,8 +1826,9 @@ static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
return NULL;
}
-static inline void count_objcg_event(struct obj_cgroup *objcg,
- enum vm_event_item idx)
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
}
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index ebf73d4ee969..107bdcbedf79 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -293,7 +293,7 @@ struct memstick_host {
};
struct memstick_driver {
- struct memstick_device_id *id_table;
+ const struct memstick_device_id *id_table;
int (*probe)(struct memstick_dev *card);
void (*remove)(struct memstick_dev *card);
int (*suspend)(struct memstick_dev *card,
diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h
new file mode 100644
index 000000000000..badbec4c7033
--- /dev/null
+++ b/include/linux/mfd/cgbc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Congatec Board Controller driver definitions
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#ifndef _LINUX_MFD_CGBC_H_
+
+/**
+ * struct cgbc_version - Board Controller device version structure
+ * @feature: Board Controller feature number
+ * @major: Board Controller major revision
+ * @minor: Board Controller minor revision
+ */
+struct cgbc_version {
+ unsigned char feature;
+ unsigned char major;
+ unsigned char minor;
+};
+
+/**
+ * struct cgbc_device_data - Internal representation of the Board Controller device
+ * @io_session: Pointer to the session IO memory
+ * @io_cmd: Pointer to the command IO memory
+ * @session: Session id returned by the Board Controller
+ * @dev: Pointer to kernel device structure
+ * @cgbc_version: Board Controller version structure
+ * @mutex: Board Controller mutex
+ */
+struct cgbc_device_data {
+ void __iomem *io_session;
+ void __iomem *io_cmd;
+ u8 session;
+ struct device *dev;
+ struct cgbc_version version;
+ struct mutex lock;
+};
+
+int cgbc_command(struct cgbc_device_data *cgbc, void *cmd, unsigned int cmd_size,
+ void *data, unsigned int data_size, u8 *status);
+
+#endif /*_LINUX_MFD_CGBC_H_*/
diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h
index 762a7d40c843..fc50e89edfaa 100644
--- a/include/linux/mfd/max5970.h
+++ b/include/linux/mfd/max5970.h
@@ -16,18 +16,6 @@
#define MAX5978_NUM_SWITCHES 1
#define MAX5970_NUM_LEDS 4
-struct max5970_data {
- int num_switches;
- u32 irng[MAX5970_NUM_SWITCHES];
- u32 mon_rng[MAX5970_NUM_SWITCHES];
- u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES];
-};
-
-enum max5970_chip_type {
- TYPE_MAX5978 = 1,
- TYPE_MAX5970,
-};
-
#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
#define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4)
#define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e23c692a34c7..fc7e6153b73d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -45,7 +45,6 @@
#include <linux/workqueue.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
-#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/auxiliary_bus.h>
@@ -474,36 +473,6 @@ struct mlx5_core_sriov {
u16 max_ec_vfs;
};
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
-struct mlx5_fc_stats {
- spinlock_t counters_idr_lock; /* protects counters_idr */
- struct idr counters_idr;
- struct list_head counters;
- struct llist_head addlist;
- struct llist_head dellist;
-
- struct workqueue_struct *wq;
- struct delayed_work work;
- unsigned long next_query;
- unsigned long sampling_interval; /* jiffies */
- u32 *bulk_query_out;
- int bulk_query_len;
- size_t num_counters;
- bool bulk_query_alloc_failed;
- unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
-};
-
struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
@@ -630,7 +599,7 @@ struct mlx5_priv {
struct mlx5_devcom_comp_dev *hca_devcom_comp;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
- struct mlx5_fc_stats fc_stats;
+ struct mlx5_fc_stats *fc_stats;
struct mlx5_rl_table rl_table;
struct mlx5_ft_pool *ft_pool;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b744e554f014..438db888bde0 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -298,9 +298,6 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
-/* As mlx5_fc_create() but doesn't queue stats refresh thread. */
-struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging);
-
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 96d369112bfa..c79ba6197673 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -4105,11 +4105,47 @@ enum {
ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
};
+enum {
+ TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+ TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+};
+
+enum {
+ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
+ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
+ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+ u8 reserved_at_0[0x8];
+ u8 tsar_type[0x8];
+ u8 reserved_at_10[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+ u8 reserved_at_0[0x10];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_tc_element_bits {
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0xc];
+ u8 vport_number[0x10];
+};
+
+union mlx5_ifc_element_attributes_bits {
+ struct mlx5_ifc_tsar_element_bits tsar;
+ struct mlx5_ifc_vport_element_bits vport;
+ struct mlx5_ifc_vport_tc_element_bits vport_tc;
+ u8 reserved_at_0[0x20];
+};
+
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
- u8 element_attributes[0x20];
+ union mlx5_ifc_element_attributes_bits element_attributes;
u8 parent_element_id[0x20];
@@ -4798,35 +4834,6 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60];
};
-struct mlx5_ifc_vport_tc_element_bits {
- u8 traffic_class[0x4];
- u8 reserved_at_4[0xc];
- u8 vport_number[0x10];
-};
-
-struct mlx5_ifc_vport_element_bits {
- u8 reserved_at_0[0x10];
- u8 vport_number[0x10];
-};
-
-enum {
- TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
- TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
- TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
-};
-
-enum {
- TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
- TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
- TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
-};
-
-struct mlx5_ifc_tsar_element_bits {
- u8 reserved_at_0[0x8];
- u8 tsar_type[0x8];
- u8 reserved_at_10[0x10];
-};
-
enum {
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 61fff5d34ed5..feb5c8021bef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -329,12 +329,14 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
+#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -365,7 +367,17 @@ extern unsigned int kobjsize(const void *objp);
* for more details on the guard size.
*/
# define VM_SHADOW_STACK VM_HIGH_ARCH_5
-#else
+#endif
+
+#if defined(CONFIG_ARM64_GCS)
+/*
+ * arm64's Guarded Control Stack implements similar functionality and
+ * has similar constraints to shadow stacks.
+ */
+# define VM_SHADOW_STACK VM_HIGH_ARCH_6
+#endif
+
+#ifndef VM_SHADOW_STACK
# define VM_SHADOW_STACK VM_NONE
#endif
@@ -4220,4 +4232,8 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
+int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
+int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
+int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6e3bdf8e38bc..e85beea1206e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -521,9 +521,6 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
*/
#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
-#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
-#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
-
/*
* page_private can be used on tail pages. However, PagePrivate is only
* checked by the VM on the head page. So page_private on the tail pages
@@ -542,21 +539,6 @@ static inline void *folio_get_private(struct folio *folio)
return folio->private;
}
-struct page_frag_cache {
- void * va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- __u16 offset;
- __u16 size;
-#else
- __u32 offset;
-#endif
- /* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_refcount every time we allocate a fragment.
- */
- unsigned int pagecnt_bias;
- bool pfmemalloc;
-};
-
typedef unsigned long vm_flags_t;
/*
@@ -782,6 +764,7 @@ struct vm_area_struct {
struct mm_cid {
u64 time;
int cid;
+ int recent_cid;
};
#endif
@@ -852,6 +835,27 @@ struct mm_struct {
* When the next mm_cid scan is due (in jiffies).
*/
unsigned long mm_cid_next_scan;
+ /**
+ * @nr_cpus_allowed: Number of CPUs allowed for mm.
+ *
+ * Number of CPUs allowed in the union of all mm's
+ * threads allowed CPUs.
+ */
+ unsigned int nr_cpus_allowed;
+ /**
+ * @max_nr_cid: Maximum number of concurrency IDs allocated.
+ *
+ * Track the highest number of concurrency IDs allocated for the
+ * mm.
+ */
+ atomic_t max_nr_cid;
+ /**
+ * @cpus_allowed_lock: Lock protecting mm cpus_allowed.
+ *
+ * Provide mutual exclusion for mm cpus_allowed and
+ * mm nr_cpus_allowed updates.
+ */
+ raw_spinlock_t cpus_allowed_lock;
#endif
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
@@ -1170,18 +1174,30 @@ static inline int mm_cid_clear_lazy_put(int cid)
return cid & ~MM_CID_LAZY_PUT;
}
+/*
+ * mm_cpus_allowed: Union of all mm's threads allowed CPUs.
+ */
+static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
+{
+ unsigned long bitmap = (unsigned long)mm;
+
+ bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ /* Skip cpu_bitmap */
+ bitmap += cpumask_size();
+ return (struct cpumask *)bitmap;
+}
+
/* Accessor for struct mm_struct's cidmask. */
static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
{
- unsigned long cid_bitmap = (unsigned long)mm;
+ unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
- cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
- /* Skip cpu_bitmap */
+ /* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
return (struct cpumask *)cid_bitmap;
}
-static inline void mm_init_cid(struct mm_struct *mm)
+static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
{
int i;
@@ -1189,17 +1205,22 @@ static inline void mm_init_cid(struct mm_struct *mm)
struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
pcpu_cid->cid = MM_CID_UNSET;
+ pcpu_cid->recent_cid = MM_CID_UNSET;
pcpu_cid->time = 0;
}
+ mm->nr_cpus_allowed = p->nr_cpus_allowed;
+ atomic_set(&mm->max_nr_cid, 0);
+ raw_spin_lock_init(&mm->cpus_allowed_lock);
+ cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
cpumask_clear(mm_cidmask(mm));
}
-static inline int mm_alloc_cid_noprof(struct mm_struct *mm)
+static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
if (!mm->pcpu_cid)
return -ENOMEM;
- mm_init_cid(mm);
+ mm_init_cid(mm, p);
return 0;
}
#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
@@ -1212,16 +1233,31 @@ static inline void mm_destroy_cid(struct mm_struct *mm)
static inline unsigned int mm_cid_size(void)
{
- return cpumask_size();
+ return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
+}
+
+static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask)
+{
+ struct cpumask *mm_allowed = mm_cpus_allowed(mm);
+
+ if (!mm)
+ return;
+ /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
+ raw_spin_lock(&mm->cpus_allowed_lock);
+ cpumask_or(mm_allowed, mm_allowed, cpumask);
+ WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
+ raw_spin_unlock(&mm->cpus_allowed_lock);
}
#else /* CONFIG_SCHED_MM_CID */
-static inline void mm_init_cid(struct mm_struct *mm) { }
-static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
+static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
+static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
static inline void mm_destroy_cid(struct mm_struct *mm) { }
+
static inline unsigned int mm_cid_size(void)
{
return 0;
}
+static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { }
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index bff5706b76e1..a82aa80c0ba4 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -8,6 +8,7 @@
* (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
*/
+#include <linux/align.h>
#include <linux/types.h>
#include <asm/page.h>
@@ -43,6 +44,26 @@ struct page_frag {
#endif
};
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+struct page_frag_cache {
+ /* encoded_page consists of the virtual address, pfmemalloc bit and
+ * order of a page.
+ */
+ unsigned long encoded_page;
+
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_refcount every time we allocate a fragment.
+ */
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
+ __u16 offset;
+ __u16 pagecnt_bias;
+#else
+ __u32 offset;
+ __u32 pagecnt_bias;
+#endif
+};
+
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
diff --git a/include/linux/mman.h b/include/linux/mman.h
index bcb201ab7a41..a842783ffa62 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MMAN_H
#define _LINUX_MMAN_H
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu_counter.h>
@@ -94,7 +95,7 @@ static inline void vm_unacct_memory(long pages)
#endif
#ifndef arch_calc_vm_flag_bits
-#define arch_calc_vm_flag_bits(flags) 0
+#define arch_calc_vm_flag_bits(file, flags) 0
#endif
#ifndef arch_validate_prot
@@ -151,13 +152,13 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
* Combine the mmap "flags" argument into "vm_flags" used internally.
*/
static inline unsigned long
-calc_vm_flag_bits(unsigned long flags)
+calc_vm_flag_bits(struct file *file, unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
- arch_calc_vm_flag_bits(flags);
+ arch_calc_vm_flag_bits(file, flags);
}
unsigned long vm_commit_limit(void);
@@ -188,16 +189,31 @@ static inline bool arch_memory_deny_write_exec_supported(void)
*
* d) mmap(PROT_READ | PROT_EXEC)
* mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ *
+ * This is only applicable if the user has set the Memory-Deny-Write-Execute
+ * (MDWE) protection mask for the current process.
+ *
+ * @old specifies the VMA flags the VMA originally possessed, and @new the ones
+ * we propose to set.
+ *
+ * Return: false if proposed change is OK, true if not ok and should be denied.
*/
-static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
+static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
{
+ /* If MDWE is disabled, we have nothing to deny. */
if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
return false;
- if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
+ /* If the new VMA is not executable, we have nothing to deny. */
+ if (!(new & VM_EXEC))
+ return false;
+
+ /* Under MDWE we do not accept newly writably executable VMAs... */
+ if (new & VM_WRITE)
return true;
- if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
+ /* ...nor previously non-executable VMAs becoming executable. */
+ if (!(old & VM_EXEC))
return true;
return false;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index f34407cc2788..526fce581657 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -35,7 +35,7 @@ struct mmc_csd {
unsigned int wp_grp_size;
unsigned int read_blkbits;
unsigned int write_blkbits;
- unsigned int capacity;
+ sector_t capacity;
unsigned int read_partial:1,
read_misalign:1,
write_partial:1,
@@ -188,6 +188,12 @@ struct sd_switch_caps {
#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
+
+#define SD4_SET_POWER_LIMIT_0_72W 0
+#define SD4_SET_POWER_LIMIT_1_44W 1
+#define SD4_SET_POWER_LIMIT_2_16W 2
+#define SD4_SET_POWER_LIMIT_2_88W 3
+#define SD4_SET_POWER_LIMIT_1_80W 4
};
struct sd_ext_reg {
@@ -209,6 +215,34 @@ struct sd_ext_reg {
#define SD_EXT_PERF_CMD_QUEUE (1<<4)
};
+struct sd_uhs2_config {
+ u32 node_id;
+
+ u32 n_fcu;
+ u32 maxblk_len;
+ u8 n_lanes;
+ u8 dadr_len;
+ u8 app_type;
+ u8 phy_minor_rev;
+ u8 phy_major_rev;
+ u8 can_hibernate;
+ u8 n_lss_sync;
+ u8 n_lss_dir;
+ u8 link_minor_rev;
+ u8 link_major_rev;
+ u8 dev_type;
+ u8 n_data_gap;
+
+ u32 n_fcu_set;
+ u32 maxblk_len_set;
+ u8 n_lanes_set;
+ u8 speed_range_set;
+ u8 n_lss_sync_set;
+ u8 n_lss_dir_set;
+ u8 n_data_gap_set;
+ u8 max_retry_set;
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -294,6 +328,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
@@ -319,6 +354,8 @@ struct mmc_card {
struct sd_ext_reg ext_power; /* SD extension reg for PM */
struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
+ struct sd_uhs2_config uhs2_config; /* SD UHS-II config */
+
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
struct sdio_cccr cccr; /* common card info */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index f0ac2e469b32..56972bd78462 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -11,6 +11,20 @@
struct mmc_data;
struct mmc_request;
+#define UHS2_MAX_PAYLOAD_LEN 2
+#define UHS2_MAX_RESP_LEN 20
+
+struct uhs2_command {
+ u16 header;
+ u16 arg;
+ __be32 payload[UHS2_MAX_PAYLOAD_LEN];
+ u8 payload_len;
+ u8 packet_len;
+ u8 tmode_half_duplex;
+ u8 uhs2_resp[UHS2_MAX_RESP_LEN]; /* UHS2 native cmd resp */
+ u8 uhs2_resp_len; /* UHS2 native cmd resp len */
+};
+
struct mmc_command {
u32 opcode;
u32 arg;
@@ -96,6 +110,12 @@ struct mmc_command {
unsigned int busy_timeout; /* busy detect timeout in ms */
struct mmc_data *data; /* data segment associated with cmd */
struct mmc_request *mrq; /* associated request */
+
+ struct uhs2_command *uhs2_cmd; /* UHS2 command */
+
+ /* for SDUC */
+ bool has_ext_addr;
+ u8 ext_addr;
};
struct mmc_data {
@@ -154,6 +174,7 @@ struct mmc_request {
const struct bio_crypt_ctx *crypto_ctx;
int crypto_key_slot;
#endif
+ struct uhs2_command uhs2_cmd;
};
struct mmc_card;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8fc2b328ec4d..f166d6611ddb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -17,6 +17,7 @@
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
#include <linux/blk-crypto-profile.h>
+#include <linux/mmc/sd_uhs2.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -64,6 +65,10 @@ struct mmc_ios {
#define MMC_TIMING_MMC_HS400 10
#define MMC_TIMING_SD_EXP 11
#define MMC_TIMING_SD_EXP_1_2V 12
+#define MMC_TIMING_UHS2_SPEED_A 13
+#define MMC_TIMING_UHS2_SPEED_A_HD 14
+#define MMC_TIMING_UHS2_SPEED_B 15
+#define MMC_TIMING_UHS2_SPEED_B_HD 16
unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
@@ -71,6 +76,9 @@ struct mmc_ios {
#define MMC_SIGNAL_VOLTAGE_180 1
#define MMC_SIGNAL_VOLTAGE_120 2
+ unsigned char vqmmc2_voltage;
+#define MMC_VQMMC2_VOLTAGE_180 0
+
unsigned char drv_type; /* driver type (A, B, C, D) */
#define MMC_SET_DRIVER_TYPE_B 0
@@ -92,6 +100,43 @@ struct mmc_clk_phase_map {
struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
};
+struct sd_uhs2_caps {
+ u32 dap;
+ u32 gap;
+ u32 group_desc;
+ u32 maxblk_len;
+ u32 n_fcu;
+ u8 n_lanes;
+ u8 addr64;
+ u8 card_type;
+ u8 phy_rev;
+ u8 speed_range;
+ u8 n_lss_sync;
+ u8 n_lss_dir;
+ u8 link_rev;
+ u8 host_type;
+ u8 n_data_gap;
+
+ u32 maxblk_len_set;
+ u32 n_fcu_set;
+ u8 n_lanes_set;
+ u8 n_lss_sync_set;
+ u8 n_lss_dir_set;
+ u8 n_data_gap_set;
+ u8 max_retry_set;
+};
+
+enum sd_uhs2_operation {
+ UHS2_PHY_INIT = 0,
+ UHS2_SET_CONFIG,
+ UHS2_ENABLE_INT,
+ UHS2_DISABLE_INT,
+ UHS2_ENABLE_CLK,
+ UHS2_DISABLE_CLK,
+ UHS2_CHECK_DORMANT,
+ UHS2_SET_IOS,
+};
+
struct mmc_host;
enum mmc_err_stat {
@@ -219,6 +264,14 @@ struct mmc_host_ops {
/* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
+
+ /*
+ * The uhs2_control callback is used to execute SD UHS-II specific
+ * operations. It's mandatory to implement for hosts that supports the
+ * SD UHS-II interface (MMC_CAP2_SD_UHS2). Expected return values are a
+ * negative errno in case of a failure or zero for success.
+ */
+ int (*uhs2_control)(struct mmc_host *host, enum sd_uhs2_operation op);
};
struct mmc_cqe_ops {
@@ -288,6 +341,7 @@ struct mmc_pwrseq;
struct mmc_supply {
struct regulator *vmmc; /* Card power supply */
struct regulator *vqmmc; /* Optional Vccq supply */
+ struct regulator *vqmmc2; /* Optional supply for phy */
};
struct mmc_ctx {
@@ -379,6 +433,7 @@ struct mmc_host {
MMC_CAP2_HS200_1_2V_SDR)
#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
+#define MMC_CAP2_SD_UHS2 (1 << 9) /* SD UHS-II support */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
@@ -405,6 +460,10 @@ struct mmc_host {
#endif
#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
+ bool uhs2_sd_tran; /* UHS-II flag for SD_TRAN state */
+ bool uhs2_app_cmd; /* UHS-II flag for APP command */
+ struct sd_uhs2_caps uhs2_caps; /* Host UHS-II capabilities */
+
int fixed_drv_type; /* fixed driver type for non-removable media */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -567,6 +626,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
+int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios);
#else
static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
@@ -580,6 +640,12 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
{
return -EINVAL;
}
+
+static inline int mmc_regulator_set_vqmmc2(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ return -EINVAL;
+}
#endif
int mmc_regulator_get_supply(struct mmc_host *mmc);
@@ -615,6 +681,14 @@ static inline int mmc_card_uhs(struct mmc_card *card)
card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
}
+static inline bool mmc_card_uhs2(struct mmc_host *host)
+{
+ return host->ios.timing == MMC_TIMING_UHS2_SPEED_A ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD;
+}
+
void mmc_retune_timer_stop(struct mmc_host *host);
static inline void mmc_retune_needed(struct mmc_host *host)
@@ -649,6 +723,12 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
host->err_stats[stat] += 1;
}
+static inline int mmc_card_uhs2_hd_mode(struct mmc_host *host)
+{
+ return host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD;
+}
+
int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
u8 value, u8 *resp);
int mmc_send_status(struct mmc_card *card, u32 *status);
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 6727576a8755..af5fc70e09a2 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -15,6 +15,9 @@
#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
#define SD_SWITCH_VOLTAGE 11 /* ac R1 */
+/* Class 2 */
+#define SD_ADDR_EXT 22 /* ac [5:0] R1 */
+
/* class 10 */
#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
@@ -36,6 +39,7 @@
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
+#define SD_OCR_2T (1 << 27) /* HO2T/CO2T - SDUC support */
#define SD_OCR_XPC (1 << 28) /* SDXC power control */
#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */
diff --git a/include/linux/mmc/sd_uhs2.h b/include/linux/mmc/sd_uhs2.h
new file mode 100644
index 000000000000..7abe9bd870c7
--- /dev/null
+++ b/include/linux/mmc/sd_uhs2.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Header file for UHS-II packets, Host Controller registers and I/O
+ * accessors.
+ *
+ * Copyright (C) 2014 Intel Corp, All Rights Reserved.
+ */
+#ifndef LINUX_MMC_UHS2_H
+#define LINUX_MMC_UHS2_H
+
+/* LINK Layer definition */
+/*
+ * UHS2 Header:
+ * Refer to UHS-II Addendum Version 1.02 Figure 5-2, the format of CCMD Header is described below:
+ * bit [3:0] : DID(Destination ID = Node ID of UHS2 card)
+ * bit [6:4] : TYP(Packet Type)
+ * 000b: CCMD(Control command packet)
+ * 001b: DCMD(Data command packet)
+ * 010b: RES(Response packet)
+ * 011b: DATA(Data payload packet)
+ * 111b: MSG(Message packet)
+ * Others: Reserved
+ * bit [7] : NP(Native Packet)
+ * bit [10:8] : TID(Transaction ID)
+ * bit [11] : Reserved
+ * bit [15:12]: SID(Source ID 0: Node ID of Host)
+ *
+ * Broadcast CCMD issued by Host is represented as DID=SID=0.
+ */
+/*
+ * UHS2 Argument:
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-5, the format of CCMD Argument is described below:
+ * bit [3:0] : MSB of IOADR
+ * bit [5:4] : PLEN(Payload Length)
+ * 00b: 0 byte
+ * 01b: 4 bytes
+ * 10b: 8 bytes
+ * 11b: 16 bytes
+ * bit [6] : Reserved
+ * bit [7] : R/W(Read/Write)
+ * 0: Control read command
+ * 1: Control write command
+ * bit [15:8] : LSB of IOADR
+ *
+ * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD.
+ * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last.
+ */
+#define UHS2_NATIVE_PACKET_POS 7
+#define UHS2_NATIVE_PACKET (1 << UHS2_NATIVE_PACKET_POS)
+
+#define UHS2_PACKET_TYPE_POS 4
+#define UHS2_PACKET_TYPE_CCMD (0 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_DCMD (1 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_RES (2 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_DATA (3 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_MSG (7 << UHS2_PACKET_TYPE_POS)
+
+#define UHS2_DEST_ID_MASK 0x0F
+#define UHS2_DEST_ID 0x1
+
+#define UHS2_SRC_ID_POS 12
+#define UHS2_SRC_ID_MASK 0xF000
+
+#define UHS2_TRANS_ID_POS 8
+#define UHS2_TRANS_ID_MASK 0x0700
+
+/* UHS2 MSG */
+#define UHS2_MSG_CTG_POS 5
+#define UHS2_MSG_CTG_LMSG 0x00
+#define UHS2_MSG_CTG_INT 0x60
+#define UHS2_MSG_CTG_AMSG 0x80
+
+#define UHS2_MSG_CTG_FCREQ 0x00
+#define UHS2_MSG_CTG_FCRDY 0x01
+#define UHS2_MSG_CTG_STAT 0x02
+
+#define UHS2_MSG_CODE_POS 8
+#define UHS2_MSG_CODE_FC_UNRECOVER_ERR 0x8
+#define UHS2_MSG_CODE_STAT_UNRECOVER_ERR 0x8
+#define UHS2_MSG_CODE_STAT_RECOVER_ERR 0x1
+
+/* TRANS Layer definition */
+
+/* Native packets*/
+#define UHS2_NATIVE_CMD_RW_POS 7
+#define UHS2_NATIVE_CMD_WRITE (1 << UHS2_NATIVE_CMD_RW_POS)
+#define UHS2_NATIVE_CMD_READ (0 << UHS2_NATIVE_CMD_RW_POS)
+
+#define UHS2_NATIVE_CMD_PLEN_POS 4
+#define UHS2_NATIVE_CMD_PLEN_4B (1 << UHS2_NATIVE_CMD_PLEN_POS)
+#define UHS2_NATIVE_CMD_PLEN_8B (2 << UHS2_NATIVE_CMD_PLEN_POS)
+#define UHS2_NATIVE_CMD_PLEN_16B (3 << UHS2_NATIVE_CMD_PLEN_POS)
+
+#define UHS2_NATIVE_CCMD_GET_MIOADR_MASK 0xF00
+#define UHS2_NATIVE_CCMD_MIOADR_MASK 0x0F
+
+#define UHS2_NATIVE_CCMD_LIOADR_POS 8
+#define UHS2_NATIVE_CCMD_GET_LIOADR_MASK 0x0FF
+
+#define UHS2_CCMD_DEV_INIT_COMPLETE_FLAG BIT(11)
+#define UHS2_DEV_INIT_PAYLOAD_LEN 1
+#define UHS2_DEV_INIT_RESP_LEN 6
+#define UHS2_DEV_ENUM_PAYLOAD_LEN 1
+#define UHS2_DEV_ENUM_RESP_LEN 8
+#define UHS2_CFG_WRITE_PAYLOAD_LEN 2
+#define UHS2_CFG_WRITE_PHY_SET_RESP_LEN 4
+#define UHS2_CFG_WRITE_GENERIC_SET_RESP_LEN 5
+#define UHS2_GO_DORMANT_PAYLOAD_LEN 1
+
+/*
+ * UHS2 Argument:
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-8, the format of DCMD Argument is described below:
+ * bit [3:0] : Reserved
+ * bit [6:3] : TMODE(Transfer Mode)
+ * bit 3: DAM(Data Access Mode)
+ * bit 4: TLUM(TLEN Unit Mode)
+ * bit 5: LM(Length Mode)
+ * bit 6: DM(Duplex Mode)
+ * bit [7] : R/W(Read/Write)
+ * 0: Control read command
+ * 1: Control write command
+ * bit [15:8] : Reserved
+ *
+ * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD.
+ * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last.
+ */
+#define UHS2_DCMD_DM_POS 6
+#define UHS2_DCMD_2L_HD_MODE (1 << UHS2_DCMD_DM_POS)
+#define UHS2_DCMD_LM_POS 5
+#define UHS2_DCMD_LM_TLEN_EXIST (1 << UHS2_DCMD_LM_POS)
+#define UHS2_DCMD_TLUM_POS 4
+#define UHS2_DCMD_TLUM_BYTE_MODE (1 << UHS2_DCMD_TLUM_POS)
+#define UHS2_NATIVE_DCMD_DAM_POS 3
+#define UHS2_NATIVE_DCMD_DAM_IO (1 << UHS2_NATIVE_DCMD_DAM_POS)
+
+#define UHS2_RES_NACK_POS 7
+#define UHS2_RES_NACK_MASK (0x1 << UHS2_RES_NACK_POS)
+
+#define UHS2_RES_ECODE_POS 4
+#define UHS2_RES_ECODE_MASK 0x7
+#define UHS2_RES_ECODE_COND 1
+#define UHS2_RES_ECODE_ARG 2
+#define UHS2_RES_ECODE_GEN 3
+
+/* IOADR of device registers */
+#define UHS2_IOADR_GENERIC_CAPS 0x00
+#define UHS2_IOADR_PHY_CAPS 0x02
+#define UHS2_IOADR_LINK_CAPS 0x04
+#define UHS2_IOADR_RSV_CAPS 0x06
+#define UHS2_IOADR_GENERIC_SETTINGS 0x08
+#define UHS2_IOADR_PHY_SETTINGS 0x0A
+#define UHS2_IOADR_LINK_SETTINGS 0x0C
+#define UHS2_IOADR_PRESET 0x40
+
+/* SD application packets */
+#define UHS2_SD_CMD_INDEX_POS 8
+
+#define UHS2_SD_CMD_APP_POS 14
+#define UHS2_SD_CMD_APP (1 << UHS2_SD_CMD_APP_POS)
+
+/* UHS-II Device Registers */
+#define UHS2_DEV_CONFIG_REG 0x000
+
+/* General Caps and Settings registers */
+#define UHS2_DEV_CONFIG_GEN_CAPS (UHS2_DEV_CONFIG_REG + 0x000)
+#define UHS2_DEV_CONFIG_N_LANES_POS 8
+#define UHS2_DEV_CONFIG_N_LANES_MASK 0x3F
+#define UHS2_DEV_CONFIG_2L_HD_FD 0x1
+#define UHS2_DEV_CONFIG_2D1U_FD 0x2
+#define UHS2_DEV_CONFIG_1D2U_FD 0x4
+#define UHS2_DEV_CONFIG_2D2U_FD 0x8
+#define UHS2_DEV_CONFIG_DADR_POS 14
+#define UHS2_DEV_CONFIG_DADR_MASK 0x1
+#define UHS2_DEV_CONFIG_APP_POS 16
+#define UHS2_DEV_CONFIG_APP_MASK 0xFF
+#define UHS2_DEV_CONFIG_APP_SD_MEM 0x1
+
+#define UHS2_DEV_CONFIG_GEN_SET (UHS2_DEV_CONFIG_REG + 0x008)
+#define UHS2_DEV_CONFIG_GEN_SET_N_LANES_POS 8
+#define UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD 0x0
+#define UHS2_DEV_CONFIG_GEN_SET_2D1U_FD 0x2
+#define UHS2_DEV_CONFIG_GEN_SET_1D2U_FD 0x3
+#define UHS2_DEV_CONFIG_GEN_SET_2D2U_FD 0x4
+#define UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE BIT(31)
+
+/* PHY Caps and Settings registers */
+#define UHS2_DEV_CONFIG_PHY_CAPS (UHS2_DEV_CONFIG_REG + 0x002)
+#define UHS2_DEV_CONFIG_PHY_MINOR_MASK 0xF
+#define UHS2_DEV_CONFIG_PHY_MAJOR_POS 4
+#define UHS2_DEV_CONFIG_PHY_MAJOR_MASK 0x3
+#define UHS2_DEV_CONFIG_CAN_HIBER_POS 15
+#define UHS2_DEV_CONFIG_CAN_HIBER_MASK 0x1
+#define UHS2_DEV_CONFIG_PHY_CAPS1 (UHS2_DEV_CONFIG_REG + 0x003)
+#define UHS2_DEV_CONFIG_N_LSS_SYN_MASK 0xF
+#define UHS2_DEV_CONFIG_N_LSS_DIR_POS 4
+#define UHS2_DEV_CONFIG_N_LSS_DIR_MASK 0xF
+
+#define UHS2_DEV_CONFIG_PHY_SET (UHS2_DEV_CONFIG_REG + 0x00A)
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_POS 6
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_A 0x0
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_B 0x1
+
+/* LINK-TRAN Caps and Settings registers */
+#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS (UHS2_DEV_CONFIG_REG + 0x004)
+#define UHS2_DEV_CONFIG_LT_MINOR_MASK 0xF
+#define UHS2_DEV_CONFIG_LT_MAJOR_POS 4
+#define UHS2_DEV_CONFIG_LT_MAJOR_MASK 0x3
+#define UHS2_DEV_CONFIG_N_FCU_POS 8
+#define UHS2_DEV_CONFIG_N_FCU_MASK 0xFF
+#define UHS2_DEV_CONFIG_DEV_TYPE_POS 16
+#define UHS2_DEV_CONFIG_DEV_TYPE_MASK 0x7
+#define UHS2_DEV_CONFIG_MAX_BLK_LEN_POS 20
+#define UHS2_DEV_CONFIG_MAX_BLK_LEN_MASK 0xFFF
+#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS1 (UHS2_DEV_CONFIG_REG + 0x005)
+#define UHS2_DEV_CONFIG_N_DATA_GAP_MASK 0xFF
+
+#define UHS2_DEV_CONFIG_LINK_TRAN_SET (UHS2_DEV_CONFIG_REG + 0x00C)
+#define UHS2_DEV_CONFIG_LT_SET_MAX_BLK_LEN 0x200
+#define UHS2_DEV_CONFIG_LT_SET_MAX_RETRY_POS 16
+
+/* Preset register */
+#define UHS2_DEV_CONFIG_PRESET (UHS2_DEV_CONFIG_REG + 0x040)
+
+#define UHS2_DEV_INT_REG 0x100
+
+#define UHS2_DEV_STATUS_REG 0x180
+
+#define UHS2_DEV_CMD_REG 0x200
+#define UHS2_DEV_CMD_FULL_RESET (UHS2_DEV_CMD_REG + 0x000)
+#define UHS2_DEV_CMD_GO_DORMANT_STATE (UHS2_DEV_CMD_REG + 0x001)
+#define UHS2_DEV_CMD_DORMANT_HIBER BIT(7)
+#define UHS2_DEV_CMD_DEVICE_INIT (UHS2_DEV_CMD_REG + 0x002)
+#define UHS2_DEV_INIT_COMPLETE_FLAG BIT(11)
+#define UHS2_DEV_CMD_ENUMERATE (UHS2_DEV_CMD_REG + 0x003)
+#define UHS2_DEV_CMD_TRANS_ABORT (UHS2_DEV_CMD_REG + 0x004)
+
+#define UHS2_RCLK_MAX 52000000
+#define UHS2_RCLK_MIN 26000000
+
+#endif /* LINUX_MMC_UHS2_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5b1c984daf45..80bc5640bb60 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -823,6 +823,7 @@ struct zone {
unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
+ unsigned long nr_free_highatomic;
/*
* We don't know if the memory that we're going to allocate will be
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 8896705ccd63..ecc686409161 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -52,6 +52,7 @@
#include <net/net_trackers.h>
#include <net/net_debug.h>
#include <net/dropreason-core.h>
+#include <net/neighbour_tables.h>
struct netpoll_info;
struct device;
@@ -343,6 +344,16 @@ struct gro_list {
#define GRO_HASH_BUCKETS 8
/*
+ * Structure for per-NAPI config
+ */
+struct napi_config {
+ u64 gro_flush_timeout;
+ u64 irq_suspend_timeout;
+ u32 defer_hard_irqs;
+ unsigned int napi_id;
+};
+
+/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
@@ -373,10 +384,15 @@ struct napi_struct {
unsigned int napi_id;
struct hrtimer timer;
struct task_struct *thread;
+ unsigned long gro_flush_timeout;
+ unsigned long irq_suspend_timeout;
+ u32 defer_hard_irqs;
/* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
int irq;
+ int index;
+ struct napi_config *config;
};
enum {
@@ -1232,12 +1248,17 @@ struct netdev_net_notifier {
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid, u16 flags,
- * struct netlink_ext_ack *extack);
+ * bool *notified, struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
+ * Callee shall set *notified to true if it sent any appropriate
+ * notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 vid)
+ * const unsigned char *addr, u16 vid
+ * bool *notified, struct netlink_ext_ack *extack);
* Deletes the FDB entry from dev corresponding to addr.
+ * Callee shall set *notified to true if it sent any appropriate
+ * notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
* struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
@@ -1412,8 +1433,7 @@ struct net_device_ops {
__be16 proto, u16 vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
- int (*ndo_netpoll_setup)(struct net_device *dev,
- struct netpoll_info *info);
+ int (*ndo_netpoll_setup)(struct net_device *dev);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -1510,12 +1530,15 @@ struct net_device_ops {
const unsigned char *addr,
u16 vid,
u16 flags,
+ bool *notified,
struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid, struct netlink_ext_ack *extack);
+ u16 vid,
+ bool *notified,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
struct net_device *dev,
struct netlink_ext_ack *extack);
@@ -1603,6 +1626,14 @@ struct net_device_ops {
int (*ndo_hwtstamp_set)(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_config,
struct netlink_ext_ack *extack);
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ /**
+ * @net_shaper_ops: Device shaping offload operations
+ * see include/net/net_shapers.h
+ */
+ const struct net_shaper_ops *net_shaper_ops;
+#endif
};
/**
@@ -1773,7 +1804,6 @@ enum netdev_reg_state {
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
- * @wireless_data: Instance data managed by the core of wireless extensions
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
@@ -1858,9 +1888,6 @@ enum netdev_reg_state {
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
* @xdp_prog: XDP sockets filter program pointer
- * @gro_flush_timeout: timeout for GRO layer in NAPI
- * @napi_defer_hard_irqs: If not zero, provides a counter that would
- * allow to avoid NIC hard IRQ, on busy queues.
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
@@ -2009,6 +2036,16 @@ enum netdev_reg_state {
* @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
* where the clock is recovered.
*
+ * @max_pacing_offload_horizon: max EDT offload horizon in nsec.
+ * @napi_config: An array of napi_config structures containing per-NAPI
+ * settings.
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
+ * allow to avoid NIC hard IRQ, on busy queues.
+ *
+ * @neighbours: List heads pointing to this device's neighbours'
+ * dev_list, one per address-family.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2074,8 +2111,6 @@ struct net_device {
int ifindex;
unsigned int real_num_rx_queues;
struct netdev_rx_queue *_rx;
- unsigned long gro_flush_timeout;
- u32 napi_defer_hard_irqs;
unsigned int gro_max_size;
unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
@@ -2150,7 +2185,6 @@ struct net_device {
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
- struct iw_public_data *wireless_data;
#endif
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -2209,6 +2243,9 @@ struct net_device {
/* Protocol-specific pointers */
struct in_device __rcu *ip_ptr;
+ /** @fib_nh_head: nexthops associated with this netdev */
+ struct hlist_head fib_nh_head;
+
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -2399,6 +2436,27 @@ struct net_device {
/** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
struct dim_irq_moder *irq_moder;
+ u64 max_pacing_offload_horizon;
+ struct napi_config *napi_config;
+ unsigned long gro_flush_timeout;
+ u32 napi_defer_hard_irqs;
+
+ /**
+ * @lock: protects @net_shaper_hierarchy, feel free to use for other
+ * netdev-scope protection. Ordering: take after rtnl_lock.
+ */
+ struct mutex lock;
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ /**
+ * @net_shaper_hierarchy: data tracking the current shaper status
+ * see include/net/net_shapers.h
+ */
+ struct net_shaper_hierarchy *net_shaper_hierarchy;
+#endif
+
+ struct hlist_head neighbours[NEIGH_NR_TABLES];
+
u8 priv[] ____cacheline_aligned
__counted_by(priv_len);
} ____cacheline_aligned;
@@ -2649,6 +2707,22 @@ netif_napi_add_tx_weight(struct net_device *dev,
}
/**
+ * netif_napi_add_config - initialize a NAPI context with persistent config
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
+ * @index: the NAPI index
+ */
+static inline void
+netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ napi->index = index;
+ napi->config = &dev->napi_config[index];
+ netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+/**
* netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
* @dev: network device
* @napi: NAPI context
@@ -3470,7 +3544,7 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
* because in netdev_tx_completed_queue we update the dql_completed
* before checking the XOFF flag.
*/
- smp_mb();
+ smp_mb__after_atomic();
/* check again in case another CPU has just made room avail */
if (unlikely(dql_avail(&dev_queue->dql) >= 0))
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index b332c2048c75..c3ae84a77e16 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,6 +34,7 @@ struct netlink_skb_parms {
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
+#define NETLINK_CTX_SIZE 48
void netlink_table_grab(void);
@@ -239,7 +240,7 @@ int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
-struct sock *netlink_getsockbyfilp(struct file *filp);
+struct sock *netlink_getsockbyfd(int fd);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
@@ -293,7 +294,7 @@ struct netlink_callback {
int flags;
bool strict_check;
union {
- u8 ctx[48];
+ u8 ctx[NETLINK_CTX_SIZE];
/* args is deprecated. Cast a struct over ctx instead
* for proper type safety.
@@ -302,7 +303,7 @@ struct netlink_callback {
};
};
-#define NL_ASSERT_DUMP_CTX_FITS(type_name) \
+#define NL_ASSERT_CTX_FITS(type_name) \
BUILD_BUG_ON(sizeof(type_name) > \
sizeof_field(struct netlink_callback, ctx))
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index cd4e28db0cbd..b34301650c47 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -32,6 +32,7 @@ struct netpoll {
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
+ struct sk_buff_head skb_pool;
};
struct netpoll_info {
@@ -72,7 +73,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
- if (dev && dev->npinfo) {
+ if (dev && rcu_access_pointer(dev->npinfo)) {
int owner = smp_processor_id();
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
index b0dd9b1eef4f..3982fea79919 100644
--- a/include/linux/nfslocalio.h
+++ b/include/linux/nfslocalio.h
@@ -32,7 +32,8 @@ typedef struct {
struct auth_domain *dom; /* auth_domain for localio */
} nfs_uuid_t;
-void nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_init(nfs_uuid_t *);
+bool nfs_uuid_begin(nfs_uuid_t *);
void nfs_uuid_end(nfs_uuid_t *);
void nfs_uuid_is_local(const uuid_t *, struct list_head *,
struct net *, struct auth_domain *, struct module *);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index b58d9405d65e..0a6e22038ce3 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -327,7 +327,8 @@ struct nvme_id_ctrl {
__le32 sanicap;
__le32 hmminds;
__le16 hmmaxd;
- __u8 rsvd338[4];
+ __le16 nvmsetidmax;
+ __le16 endgidmax;
__u8 anatt;
__u8 anacap;
__le32 anagrpmax;
@@ -522,6 +523,7 @@ enum {
NVME_ID_CNS_NS_DESC_LIST = 0x03,
NVME_ID_CNS_CS_NS = 0x05,
NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07,
NVME_ID_CNS_NS_CS_INDEP = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
@@ -530,6 +532,7 @@ enum {
NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
NVME_ID_CNS_NS_GRANULARITY = 0x16,
NVME_ID_CNS_UUID_LIST = 0x17,
+ NVME_ID_CNS_ENDGRP_LIST = 0x19,
};
enum {
@@ -560,6 +563,8 @@ enum {
NVME_NS_FLBAS_LBA_SHIFT = 1,
NVME_NS_FLBAS_META_EXT = 0x10,
NVME_NS_NMIC_SHARED = 1 << 0,
+ NVME_NS_ROTATIONAL = 1 << 4,
+ NVME_NS_VWC_NOT_PRESENT = 1 << 5,
NVME_LBAF_RP_BEST = 0,
NVME_LBAF_RP_BETTER = 1,
NVME_LBAF_RP_GOOD = 2,
@@ -617,6 +622,40 @@ enum {
NVME_NIDT_CSI = 0x04,
};
+struct nvme_endurance_group_log {
+ __u8 egcw;
+ __u8 egfeat;
+ __u8 rsvd2;
+ __u8 avsp;
+ __u8 avspt;
+ __u8 pused;
+ __le16 did;
+ __u8 rsvd8[24];
+ __u8 ee[16];
+ __u8 dur[16];
+ __u8 duw[16];
+ __u8 muw[16];
+ __u8 hrc[16];
+ __u8 hwc[16];
+ __u8 mdie[16];
+ __u8 neile[16];
+ __u8 tegcap[16];
+ __u8 uegcap[16];
+ __u8 rsvd192[320];
+};
+
+struct nvme_rotational_media_log {
+ __le16 endgid;
+ __le16 numa;
+ __le16 nrs;
+ __u8 rsvd6[2];
+ __le32 spinc;
+ __le32 fspinc;
+ __le32 ldc;
+ __le32 fldc;
+ __u8 rsvd24[488];
+};
+
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
@@ -1244,6 +1283,7 @@ enum {
NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_FEAT_VENDOR_START = 0xC0,
NVME_FEAT_VENDOR_END = 0xFF,
+ NVME_LOG_SUPPORTED = 0x00,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -1254,6 +1294,8 @@ enum {
NVME_LOG_TELEMETRY_CTRL = 0x08,
NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
+ NVME_LOG_FEATURES = 0x12,
+ NVME_LOG_RMI = 0x16,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -1261,6 +1303,24 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+struct nvme_supported_log {
+ __le32 lids[256];
+};
+
+enum {
+ NVME_LIDS_LSUPP = 1 << 0,
+};
+
+struct nvme_supported_features_log {
+ __le32 fis[256];
+};
+
+enum {
+ NVME_FIS_FSUPP = 1 << 0,
+ NVME_FIS_NSCPE = 1 << 20,
+ NVME_FIS_CSCPE = 1 << 21,
+};
+
/* NVMe Namespace Write Protect State */
enum {
NVME_NS_NO_WRITE_PROTECT = 0,
@@ -1281,7 +1341,8 @@ struct nvme_identify {
__u8 cns;
__u8 rsvd3;
__le16 ctrlid;
- __u8 rsvd11[3];
+ __le16 cnssid;
+ __u8 rsvd11;
__u8 csi;
__u32 rsvd12[4];
};
@@ -1389,7 +1450,7 @@ struct nvme_get_log_page_command {
__u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
- __u16 rsvd11;
+ __le16 lsi;
union {
struct {
__le32 lpol;
@@ -2037,4 +2098,72 @@ struct nvme_completion {
#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
#define NVME_TERTIARY(ver) ((ver) & 0xff)
+enum {
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00,
+};
+
+enum {
+ NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00,
+ NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01,
+ NVME_PR_LOG_RESERVATION_RELEASED = 0x02,
+ NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03,
+};
+
+enum {
+ NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1,
+ NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2,
+ NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3,
+};
+
+struct nvme_pr_log {
+ __le64 count;
+ __u8 type;
+ __u8 nr_pages;
+ __u8 rsvd1[2];
+ __le32 nsid;
+ __u8 rsvd2[48];
+};
+
+struct nvmet_pr_register_data {
+ __le64 crkey;
+ __le64 nrkey;
+};
+
+struct nvmet_pr_acquire_data {
+ __le64 crkey;
+ __le64 prkey;
+};
+
+struct nvmet_pr_release_data {
+ __le64 crkey;
+};
+
+enum nvme_pr_capabilities {
+ NVME_PR_SUPPORT_PTPL = 1,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6,
+ NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7,
+};
+
+enum nvme_pr_register_action {
+ NVME_PR_REGISTER_ACT_REG = 0,
+ NVME_PR_REGISTER_ACT_UNREG = 1,
+ NVME_PR_REGISTER_ACT_REPLACE = 1 << 1,
+};
+
+enum nvme_pr_acquire_action {
+ NVME_PR_ACQUIRE_ACT_ACQUIRE = 0,
+ NVME_PR_ACQUIRE_ACT_PREEMPT = 1,
+ NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1,
+};
+
+enum nvme_pr_release_action {
+ NVME_PR_RELEASE_ACT_RELEASE = 0,
+ NVME_PR_RELEASE_ACT_CLEAR = 1,
+};
+
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 85b60ac9eec5..086a60f3b8a6 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -357,7 +357,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_cpu_device_node_get(int cpu);
extern int of_cpu_node_to_id(struct device_node *np);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
-extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node,
int index);
extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
@@ -395,7 +395,7 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
int size);
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
-extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_id(const struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
bool of_machine_compatible_match(const char *const *compats);
@@ -435,7 +435,7 @@ extern int of_detach_node(struct device_node *);
* of_property_for_each_u32(np, "propname", u)
* printk("U32 value: %x\n", u);
*/
-const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur,
u32 *pu);
/*
* struct property *prop;
@@ -444,11 +444,11 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
* of_property_for_each_string(np, "propname", prop, s)
* printk("String value: %s\n", s);
*/
-const char *of_prop_next_string(struct property *prop, const char *cur);
+const char *of_prop_next_string(const struct property *prop, const char *cur);
-bool of_console_check(struct device_node *dn, char *name, int index);
+bool of_console_check(const struct device_node *dn, char *name, int index);
-int of_map_id(struct device_node *np, u32 id,
+int of_map_id(const struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
@@ -826,13 +826,13 @@ static inline bool of_console_check(const struct device_node *dn, const char *na
return false;
}
-static inline const __be32 *of_prop_next_u32(struct property *prop,
+static inline const __be32 *of_prop_next_u32(const struct property *prop,
const __be32 *cur, u32 *pu)
{
return NULL;
}
-static inline const char *of_prop_next_string(struct property *prop,
+static inline const char *of_prop_next_string(const struct property *prop,
const char *cur)
{
return NULL;
@@ -871,7 +871,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
-static inline int of_map_id(struct device_node *np, u32 id,
+static inline int of_map_id(const struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
@@ -899,7 +899,7 @@ static inline const void *of_device_get_match_data(const struct device *dev)
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
-static inline int of_prop_val_eq(struct property *p1, struct property *p2)
+static inline int of_prop_val_eq(const struct property *p1, const struct property *p2)
{
return p1->length == p2->length &&
!memcmp(p1->value, p2->value, (size_t)p1->length);
@@ -1252,7 +1252,7 @@ static inline int of_property_read_string_index(const struct device_node *np,
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
{
- struct property *prop = of_find_property(np, propname, NULL);
+ const struct property *prop = of_find_property(np, propname, NULL);
return prop ? true : false;
}
@@ -1430,7 +1430,7 @@ static inline int of_property_read_s32(const struct device_node *np,
err = of_phandle_iterator_next(it))
#define of_property_for_each_u32(np, propname, u) \
- for (struct {struct property *prop; const __be32 *item; } _it = \
+ for (struct {const struct property *prop; const __be32 *item; } _it = \
{of_find_property(np, propname, NULL), \
of_prop_next_u32(_it.prop, NULL, &u)}; \
_it.item; \
@@ -1734,7 +1734,7 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id, struct device_node *target_base);
+ int *ovcs_id, const struct device_node *target_base);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1744,7 +1744,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id, struct device_node *target_base)
+ int *ovcs_id, const struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 26a19daf0d09..9e034363788a 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -10,7 +10,7 @@ struct of_bus;
struct of_pci_range_parser {
struct device_node *node;
- struct of_bus *bus;
+ const struct of_bus *bus;
const __be32 *range;
const __be32 *end;
int na;
@@ -83,8 +83,8 @@ extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range *range);
extern int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
+extern int of_pci_range_to_resource(const struct of_pci_range *range,
+ const struct device_node *np,
struct resource *res);
extern int of_range_to_resource(struct device_node *np, int index,
struct resource *res);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index d69ad5bb1eb1..b8d6c0c20876 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -31,6 +31,7 @@ extern void *of_fdt_unflatten_tree(const unsigned long *blob,
extern int __initdata dt_root_addr_cells;
extern int __initdata dt_root_size_cells;
extern void *initial_boot_params;
+extern phys_addr_t initial_boot_params_pa;
extern char __dtb_start[];
extern char __dtb_end[];
@@ -70,8 +71,8 @@ extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
extern int early_init_dt_scan_root(void);
-extern bool early_init_dt_scan(void *params);
-extern bool early_init_dt_verify(void *params);
+extern bool early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys);
+extern bool early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys);
extern void early_init_dt_scan_nodes(void);
extern const char *of_flat_dt_get_machine_name(void);
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index a4bea62bfa29..a692d9d979a6 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
+#include <linux/cleanup.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -37,14 +38,43 @@ struct of_endpoint {
for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
child = of_graph_get_next_endpoint(parent, child))
+/**
+ * for_each_of_graph_port - iterate over every port in a device or ports node
+ * @parent: parent device or ports node containing port
+ * @child: loop variable pointing to the current port node
+ *
+ * When breaking out of the loop, and continue to use the @child, you need to
+ * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it.
+ */
+#define for_each_of_graph_port(parent, child) \
+ for (struct device_node *child __free(device_node) = of_graph_get_next_port(parent, NULL);\
+ child != NULL; child = of_graph_get_next_port(parent, child))
+
+/**
+ * for_each_of_graph_port_endpoint - iterate over every endpoint in a port node
+ * @parent: parent port node
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, and continue to use the @child, you need to
+ * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it.
+ */
+#define for_each_of_graph_port_endpoint(parent, child) \
+ for (struct device_node *child __free(device_node) = of_graph_get_next_port_endpoint(parent, NULL);\
+ child != NULL; child = of_graph_get_next_port_endpoint(parent, child))
+
#ifdef CONFIG_OF
bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
unsigned int of_graph_get_endpoint_count(const struct device_node *np);
+unsigned int of_graph_get_port_count(struct device_node *np);
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
+struct device_node *of_graph_get_next_port(const struct device_node *parent,
+ struct device_node *port);
+struct device_node *of_graph_get_next_port_endpoint(const struct device_node *port,
+ struct device_node *prev);
struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg);
struct device_node *of_graph_get_remote_endpoint(
@@ -73,6 +103,11 @@ static inline unsigned int of_graph_get_endpoint_count(const struct device_node
return 0;
}
+static inline unsigned int of_graph_get_port_count(struct device_node *np)
+{
+ return 0;
+}
+
static inline struct device_node *of_graph_get_port_by_id(
struct device_node *node, u32 id)
{
@@ -86,6 +121,20 @@ static inline struct device_node *of_graph_get_next_endpoint(
return NULL;
}
+static inline struct device_node *of_graph_get_next_port(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_next_port_endpoint(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d6d3eae2f145..6337ad4e5fe8 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -48,12 +48,12 @@ extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
- struct device_node *np,
+ const struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 id,
u32 bus_token);
-extern void of_msi_configure(struct device *dev, struct device_node *np);
+extern void of_msi_configure(struct device *dev, const struct device_node *np);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline void of_irq_init(const struct of_device_id *matches)
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 8d6571feb95d..5d36dcd06f60 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -17,33 +17,13 @@ enum packing_op {
UNPACK,
};
-/**
- * packing - Convert numbers (currently u64) between a packed and an unpacked
- * format. Unpacked means laid out in memory in the CPU's native
- * understanding of integers, while packed means anything else that
- * requires translation.
- *
- * @pbuf: Pointer to a buffer holding the packed value.
- * @uval: Pointer to an u64 holding the unpacked value.
- * @startbit: The index (in logical notation, compensated for quirks) where
- * the packed value starts within pbuf. Must be larger than, or
- * equal to, endbit.
- * @endbit: The index (in logical notation, compensated for quirks) where
- * the packed value ends within pbuf. Must be smaller than, or equal
- * to, startbit.
- * @op: If PACK, then uval will be treated as const pointer and copied (packed)
- * into pbuf, between startbit and endbit.
- * If UNPACK, then pbuf will be treated as const pointer and the logical
- * value between startbit and endbit will be copied (unpacked) to uval.
- * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
- * QUIRK_MSB_ON_THE_RIGHT.
- *
- * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
- * correct usage, return code may be discarded.
- * If op is PACK, pbuf is modified.
- * If op is UNPACK, uval is modified.
- */
int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
enum packing_op op, u8 quirks);
+int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
+ u8 quirks);
+
+int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks);
+
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index cc839e4365c1..908ee0aad554 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -543,7 +543,7 @@ FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
* - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
PAGEFLAG(Private, private, PF_ANY)
-PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
+FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE)
/* owner_2 can be set on tail pages for anon memory */
FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
@@ -554,7 +554,7 @@ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
*/
TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
-PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
+FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
new file mode 100644
index 000000000000..41a91df82631
--- /dev/null
+++ b/include/linux/page_frag_cache.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PAGE_FRAG_CACHE_H
+#define _LINUX_PAGE_FRAG_CACHE_H
+
+#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/mm_types_task.h>
+#include <linux/types.h>
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+/* Use a full byte here to enable assembler optimization as the shift
+ * operation is usually expecting a byte.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0)
+#else
+/* Compiler should be able to figure out we don't read things as any value
+ * ANDed with 0 is 0.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK 0
+#endif
+
+#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT (PAGE_FRAG_CACHE_ORDER_MASK + 1)
+
+static inline bool encoded_page_decode_pfmemalloc(unsigned long encoded_page)
+{
+ return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
+}
+
+static inline void page_frag_cache_init(struct page_frag_cache *nc)
+{
+ nc->encoded_page = 0;
+}
+
+static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
+{
+ return encoded_page_decode_pfmemalloc(nc->encoded_page);
+}
+
+void page_frag_cache_drain(struct page_frag_cache *nc);
+void __page_frag_cache_drain(struct page *page, unsigned int count);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+ gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
+void page_frag_free(void *addr);
+
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 573b4c4c2be6..733ff6570e2d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1556,7 +1556,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data);
-int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
@@ -2019,7 +2019,7 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-static inline int pci_register_io_range(struct fwnode_handle *fwnode,
+static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
phys_addr_t addr, resource_size_t size)
{ return -EINVAL; }
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index b4a4eb6c8866..b5b5d17998b8 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -21,8 +21,6 @@
#define DW_AN_C37_1000BASEX 4
#define DW_10GBASER 5
-struct dw_xpcs_desc;
-
enum dw_xpcs_pcs_id {
DW_XPCS_ID_NATIVE = 0,
NXP_SJA1105_XPCS_ID = 0x00000010,
@@ -48,33 +46,18 @@ struct dw_xpcs_info {
u32 pma;
};
-enum dw_xpcs_clock {
- DW_XPCS_CORE_CLK,
- DW_XPCS_PAD_CLK,
- DW_XPCS_NUM_CLKS,
-};
-
-struct dw_xpcs {
- struct dw_xpcs_info info;
- const struct dw_xpcs_desc *desc;
- struct mdio_device *mdiodev;
- struct clk_bulk_data clks[DW_XPCS_NUM_CLKS];
- struct phylink_pcs pcs;
- phy_interface_t interface;
-};
+struct dw_xpcs;
+struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs);
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
-void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
- phy_interface_t interface, int speed, int duplex);
-int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
- const unsigned long *advertising, unsigned int neg_mode);
void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
int enable);
-struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
- phy_interface_t interface);
-struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode,
- phy_interface_t interface);
+struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr);
+struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode);
void xpcs_destroy(struct dw_xpcs *xpcs);
+struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr);
+void xpcs_destroy_pcs(struct phylink_pcs *pcs);
+
#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 3372c1b56486..d698efba28a2 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -257,6 +257,7 @@
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */
/* Mask for writable bits */
#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fb908843f209..cb99ec8c9e96 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -170,6 +170,12 @@ struct hw_perf_event {
};
struct { /* aux / Intel-PT */
u64 aux_config;
+ /*
+ * For AUX area events, aux_paused cannot be a state
+ * flag because it can be updated asynchronously to
+ * state.
+ */
+ unsigned int aux_paused;
};
struct { /* software */
struct hrtimer hrtimer;
@@ -294,6 +300,7 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_AUX_PAUSE 0x0200
/**
* pmu::scope
@@ -384,6 +391,8 @@ struct pmu {
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */
+#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -423,6 +432,18 @@ struct pmu {
*
* ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
+ *
+ * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
+ * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
+ * PERF_EF_RESUME.
+ *
+ * ->start() with PERF_EF_RESUME will start as simply as possible but
+ * only if the counter is not otherwise stopped. Will not overlap
+ * another ->start() with PERF_EF_RESUME nor ->stop() with
+ * PERF_EF_PAUSE.
+ *
+ * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
+ * ->stop()/->start() invocations, just not itself.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
@@ -1655,15 +1676,35 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record,
struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
-#ifndef perf_misc_flags
-# define perf_misc_flags(regs) \
+extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs);
+extern unsigned long perf_instruction_pointer(struct perf_event *event,
+ struct pt_regs *regs);
+
+#ifndef perf_arch_misc_flags
+# define perf_arch_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
-# define perf_instruction_pointer(regs) instruction_pointer(regs)
+# define perf_arch_instruction_pointer(regs) instruction_pointer(regs)
#endif
#ifndef perf_arch_bpf_user_pt_regs
# define perf_arch_bpf_user_pt_regs(regs) regs
#endif
+#ifndef perf_arch_guest_misc_flags
+static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs)
+{
+ unsigned long guest_state = perf_guest_state();
+
+ if (!(guest_state & PERF_GUEST_ACTIVE))
+ return 0;
+
+ if (guest_state & PERF_GUEST_USER)
+ return PERF_RECORD_MISC_GUEST_USER;
+ else
+ return PERF_RECORD_MISC_GUEST_KERNEL;
+}
+# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
+#endif
+
static inline bool has_branch_stack(struct perf_event *event)
{
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
@@ -1679,6 +1720,13 @@ static inline bool has_aux(struct perf_event *event)
return event->pmu->setup_aux;
}
+static inline bool has_aux_action(struct perf_event *event)
+{
+ return event->attr.aux_sample_size ||
+ event->attr.aux_pause ||
+ event->attr.aux_resume;
+}
+
static inline bool is_write_backward(struct perf_event *event)
{
return !!event->attr.write_backward;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a98bc91a0cde..77c6d6451638 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -601,7 +601,6 @@ struct macsec_ops;
* @adv_old: Saved advertised while power saving for WoL
* @supported_eee: supported PHY EEE linkmodes
* @advertising_eee: Currently advertised EEE linkmodes
- * @eee_enabled: Flag indicating whether the EEE feature is enabled
* @enable_tx_lpi: When True, MAC should transmit LPI to PHY
* @eee_cfg: User configuration of EEE
* @lp_advertising: Current link partner advertised linkmodes
@@ -721,16 +720,14 @@ struct phy_device {
/* used for eee validation and configuration*/
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
- bool eee_enabled;
-
- /* Host supported PHY interface types. Should be ignored if empty. */
- DECLARE_PHY_INTERFACE_MASK(host_interfaces);
-
/* Energy efficient ethernet modes which should be prohibited */
- u32 eee_broken_modes;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_broken_modes);
bool enable_tx_lpi;
struct eee_config eee_cfg;
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
+
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
unsigned int phy_num_led_triggers;
@@ -877,8 +874,9 @@ struct phy_plca_status {
/* Modes for PHY LED configuration */
enum phy_led_modes {
- PHY_LED_ACTIVE_LOW = 0,
- PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1,
+ PHY_LED_ACTIVE_HIGH = 0,
+ PHY_LED_ACTIVE_LOW = 1,
+ PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2,
/* keep it last */
__PHY_LED_MODES_NUM,
@@ -1260,9 +1258,20 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
unsigned long *mask);
void of_set_phy_supported(struct phy_device *phydev);
void of_set_phy_eee_broken(struct phy_device *phydev);
+void of_set_phy_timing_role(struct phy_device *phydev);
int phy_speed_down_core(struct phy_device *phydev);
/**
+ * phy_set_eee_broken - Mark an EEE mode as broken so that it isn't advertised.
+ * @phydev: The phy_device struct
+ * @link_mode: The broken EEE mode
+ */
+static inline void phy_set_eee_broken(struct phy_device *phydev, u32 link_mode)
+{
+ linkmode_set_bit(link_mode, phydev->eee_broken_modes);
+}
+
+/**
* phy_is_started - Convenience function to check whether PHY is started
* @phydev: The phy_device struct
*/
@@ -1378,12 +1387,13 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
* @regnum: The register on the MMD to read
* @val: Variable to read the register into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
* @sleep_before_read: if it is true, sleep @sleep_us before read.
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*/
@@ -1883,7 +1893,6 @@ int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
-int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
@@ -1951,7 +1960,6 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data);
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
struct ethtool_keee *data);
-int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index f9c00f839e9f..085dd8e8af76 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -13,8 +13,6 @@
#include <linux/dmaengine.h>
-extern void s3c64xx_ac97_setup_gpio(int);
-
struct samsung_i2s_type {
/* If the Primary DAI has 5.1 Channels */
#define QUIRK_PRI_6CHAN (1 << 0)
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
index 1707ad4147df..7d21e0c41037 100644
--- a/include/linux/platform_data/hwmon-s3c.h
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -33,14 +33,4 @@ struct s3c_hwmon_pdata {
struct s3c_hwmon_chcfg *in[8];
};
-/**
- * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device
- * @pd: Platform data to register to device.
- *
- * Register the given platform data for use with the S3C HWMON device.
- * The call will copy the platform data, so the board definitions can
- * make the structure itself __initdata.
- */
-extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
-
#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h
deleted file mode 100644
index 65bfdb4fdc15..000000000000
--- a/include/linux/platform_data/max6639.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_MAX6639_H
-#define _LINUX_MAX6639_H
-
-#include <linux/types.h>
-
-/* platform data for the MAX6639 temperature sensor and fan control */
-
-struct max6639_platform_data {
- bool pwm_polarity; /* Polarity low (0) or high (1, default) */
- int ppr; /* Pulses per rotation 1..4 (default == 2) */
- int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
-};
-
-#endif /* _LINUX_MAX6639_H */
diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h
deleted file mode 100644
index 2a511a8fcda7..000000000000
--- a/include/linux/platform_data/media/omap4iss.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H
-#define ARCH_ARM_PLAT_OMAP4_ISS_H
-
-#include <linux/i2c.h>
-
-struct iss_device;
-
-enum iss_interface_type {
- ISS_INTERFACE_CSI2A_PHY1,
- ISS_INTERFACE_CSI2B_PHY2,
-};
-
-/**
- * struct iss_csiphy_lane: CSI2 lane position and polarity
- * @pos: position of the lane
- * @pol: polarity of the lane
- */
-struct iss_csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISS_CSIPHY1_NUM_DATA_LANES 4
-#define ISS_CSIPHY2_NUM_DATA_LANES 1
-
-/**
- * struct iss_csiphy_lanes_cfg - CSI2 lane configuration
- * @data: Configuration of one or two data lanes
- * @clk: Clock lane configuration
- */
-struct iss_csiphy_lanes_cfg {
- struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES];
- struct iss_csiphy_lane clk;
-};
-
-/**
- * struct iss_csi2_platform_data - CSI2 interface platform data
- * @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
- */
-struct iss_csi2_platform_data {
- unsigned crc:1;
- unsigned vpclk_div:2;
- struct iss_csiphy_lanes_cfg lanecfg;
-};
-
-struct iss_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct iss_v4l2_subdevs_group {
- struct iss_subdev_i2c_board_info *subdevs;
- enum iss_interface_type interface;
- union {
- struct iss_csi2_platform_data csi2;
- } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct iss_platform_data {
- struct iss_v4l2_subdevs_group *subdevs;
- void (*set_constraints)(struct iss_device *iss, bool enable);
-};
-
-#endif
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 2ee1a679e592..0e0e8fe6975f 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -42,6 +42,7 @@ enum ksz_chip_id {
LAN9372_CHIP_ID = 0x00937200,
LAN9373_CHIP_ID = 0x00937300,
LAN9374_CHIP_ID = 0x00937400,
+ LAN9646_CHIP_ID = 0x00964600,
};
struct ksz_platform_data {
diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h
index 0ca9962e97f2..b287627759f7 100644
--- a/include/linux/platform_data/x86/intel_scu_ipc.h
+++ b/include/linux/platform_data/x86/intel_scu_ipc.h
@@ -2,9 +2,13 @@
#ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_
#define __PLATFORM_X86_INTEL_SCU_IPC_H_
+#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/types.h>
struct device;
+struct module;
+
struct intel_scu_ipc_dev;
/**
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index b637ec14025f..45646bfcaf1a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -30,9 +30,16 @@
* supplier and its PM domain when creating the
* device-links.
*
+ * PD_FLAG_REQUIRED_OPP: Assign required_devs for the required OPPs. The
+ * index of the required OPP must correspond to the
+ * index in the array of the pd_names. If pd_names
+ * isn't specified, the index just follows the
+ * index for the attached PM domain.
+ *
*/
#define PD_FLAG_NO_DEV_LINK BIT(0)
#define PD_FLAG_DEV_LINK_ON BIT(1)
+#define PD_FLAG_REQUIRED_OPP BIT(2)
struct dev_pm_domain_attach_data {
const char * const *pd_names;
@@ -43,6 +50,7 @@ struct dev_pm_domain_attach_data {
struct dev_pm_domain_list {
struct device **pd_devs;
struct device_link **pd_links;
+ u32 *opp_tokens;
u32 num_pds;
};
@@ -92,6 +100,10 @@ struct dev_pm_domain_list {
* GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
* but its corresponding OPP tables are not
* described in DT, but are given directly by FW.
+ *
+ * GENPD_FLAG_DEV_NAME_FW: Instructs genpd to generate an unique device name
+ * using ida. It is used by genpd providers which
+ * get their genpd-names directly from FW.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -101,6 +113,7 @@ struct dev_pm_domain_list {
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
+#define GENPD_FLAG_DEV_NAME_FW (1U << 8)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -163,6 +176,7 @@ struct generic_pm_domain {
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
unsigned int device_count; /* Number of devices */
+ unsigned int device_id; /* unique device id */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
@@ -244,6 +258,7 @@ struct generic_pm_domain_data {
unsigned int performance_state;
unsigned int default_pstate;
unsigned int rpm_pstate;
+ unsigned int opp_token;
bool hw_mode;
void *data;
};
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 6424692c30b7..568183e3e641 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -62,11 +62,8 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
* @supported_hw: Array of hierarchy of versions to match.
* @supported_hw_count: Number of elements in the array.
* @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
- * @genpd_names: Null terminated array of pointers containing names of genpd to
- * attach. Mutually exclusive with required_devs.
- * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
- * exclusive with required_devs.
- * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
+ * @required_dev: The required OPP device.
+ * @required_dev_index: The index of the required OPP for the @required_dev.
*
* This structure contains platform specific OPP configurations for the device.
*/
@@ -79,9 +76,8 @@ struct dev_pm_opp_config {
const unsigned int *supported_hw;
unsigned int supported_hw_count;
const char * const *regulator_names;
- const char * const *genpd_names;
- struct device ***virt_devs;
- struct device **required_devs;
+ struct device *required_dev;
+ unsigned int required_dev_index;
};
#define OPP_LEVEL_UNSET U32_MAX
@@ -675,36 +671,6 @@ static inline void dev_pm_opp_put_config_regulators(int token)
dev_pm_opp_clear_config(token);
}
-/* genpd helpers */
-static inline int dev_pm_opp_attach_genpd(struct device *dev,
- const char * const *names,
- struct device ***virt_devs)
-{
- struct dev_pm_opp_config config = {
- .genpd_names = names,
- .virt_devs = virt_devs,
- };
-
- return dev_pm_opp_set_config(dev, &config);
-}
-
-static inline void dev_pm_opp_detach_genpd(int token)
-{
- dev_pm_opp_clear_config(token);
-}
-
-static inline int devm_pm_opp_attach_genpd(struct device *dev,
- const char * const *names,
- struct device ***virt_devs)
-{
- struct dev_pm_opp_config config = {
- .genpd_names = names,
- .virt_devs = virt_devs,
- };
-
- return devm_pm_opp_set_config(dev, &config);
-}
-
/* prop-name helpers */
static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 453691710839..f11f10c97bd9 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -5,12 +5,16 @@
#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/pid.h>
#include <linux/posix-timers_types.h>
+#include <linux/rcuref.h>
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
struct kernel_siginfo;
struct task_struct;
+struct sigqueue;
+struct k_itimer;
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
@@ -35,6 +39,8 @@ static inline int clockid_to_fd(const clockid_t clk)
#ifdef CONFIG_POSIX_TIMERS
+#include <linux/signal_types.h>
+
/**
* cpu_timer - Posix CPU timer representation for k_itimer
* @node: timerqueue node to queue in the task/sig
@@ -42,6 +48,7 @@ static inline int clockid_to_fd(const clockid_t clk)
* @pid: Pointer to target task PID
* @elist: List head for the expiry list
* @firing: Timer is currently firing
+ * @nanosleep: Timer is used for nanosleep and is not a regular posix-timer
* @handling: Pointer to the task which handles expiry
*/
struct cpu_timer {
@@ -49,7 +56,8 @@ struct cpu_timer {
struct timerqueue_head *head;
struct pid *pid;
struct list_head elist;
- int firing;
+ bool firing;
+ bool nanosleep;
struct task_struct __rcu *handling;
};
@@ -101,6 +109,12 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
pct->bases[CPUCLOCK_SCHED].nextevt = runtime;
}
+void posixtimer_rearm_itimer(struct task_struct *p);
+bool posixtimer_init_sigqueue(struct sigqueue *q);
+void posixtimer_send_sigqueue(struct k_itimer *tmr);
+bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq);
+void posixtimer_free_timer(struct k_itimer *timer);
+
/* Init task static initializer */
#define INIT_CPU_TIMERBASE(b) { \
.nextevt = U64_MAX, \
@@ -122,6 +136,10 @@ struct cpu_timer { };
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
u64 cpu_limit) { }
+static inline void posixtimer_rearm_itimer(struct task_struct *p) { }
+static inline bool posixtimer_deliver_signal(struct kernel_siginfo *info,
+ struct sigqueue *timer_sigq) { return false; }
+static inline void posixtimer_free_timer(struct k_itimer *timer) { }
#endif
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
@@ -132,50 +150,56 @@ static inline void clear_posix_cputimers_work(struct task_struct *p) { }
static inline void posix_cputimers_init_work(void) { }
#endif
-#define REQUEUE_PENDING 1
-
/**
* struct k_itimer - POSIX.1b interval timer structure.
- * @list: List head for binding the timer to signals->posix_timers
+ * @list: List node for binding the timer to tsk::signal::posix_timers
+ * @ignored_list: List node for tracking ignored timers in tsk::signal::ignored_posix_timers
* @t_hash: Entry in the posix timer hash table
* @it_lock: Lock protecting the timer
* @kclock: Pointer to the k_clock struct handling this timer
* @it_clock: The posix timer clock id
* @it_id: The posix timer id for identifying the timer
- * @it_active: Marker that timer is active
+ * @it_status: The status of the timer
+ * @it_sig_periodic: The periodic status at signal delivery
* @it_overrun: The overrun counter for pending signals
* @it_overrun_last: The overrun at the time of the last delivered signal
- * @it_requeue_pending: Indicator that timer waits for being requeued on
- * signal delivery
+ * @it_signal_seq: Sequence count to control signal delivery
+ * @it_sigqueue_seq: The sequence count at the point where the signal was queued
* @it_sigev_notify: The notify word of sigevent struct for signal delivery
* @it_interval: The interval for periodic timers
* @it_signal: Pointer to the creators signal struct
* @it_pid: The pid of the process/task targeted by the signal
* @it_process: The task to wakeup on clock_nanosleep (CPU timers)
- * @sigq: Pointer to preallocated sigqueue
+ * @rcuref: Reference count for life time management
+ * @sigq: Embedded sigqueue
* @it: Union representing the various posix timer type
* internals.
* @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
struct hlist_node list;
+ struct hlist_node ignored_list;
struct hlist_node t_hash;
spinlock_t it_lock;
const struct k_clock *kclock;
clockid_t it_clock;
timer_t it_id;
- int it_active;
+ int it_status;
+ bool it_sig_periodic;
s64 it_overrun;
s64 it_overrun_last;
- int it_requeue_pending;
+ unsigned int it_signal_seq;
+ unsigned int it_sigqueue_seq;
int it_sigev_notify;
+ enum pid_type it_pid_type;
ktime_t it_interval;
struct signal_struct *it_signal;
union {
struct pid *it_pid;
struct task_struct *it_process;
};
- struct sigqueue *sigq;
+ struct sigqueue sigq;
+ rcuref_t rcuref;
union {
struct {
struct hrtimer timer;
@@ -196,5 +220,29 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
-void posixtimer_rearm(struct kernel_siginfo *info);
+#ifdef CONFIG_POSIX_TIMERS
+static inline void posixtimer_putref(struct k_itimer *tmr)
+{
+ if (rcuref_put(&tmr->rcuref))
+ posixtimer_free_timer(tmr);
+}
+
+static inline void posixtimer_sigqueue_getref(struct sigqueue *q)
+{
+ struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
+
+ WARN_ON_ONCE(!rcuref_get(&tmr->rcuref));
+}
+
+static inline void posixtimer_sigqueue_putref(struct sigqueue *q)
+{
+ struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
+
+ posixtimer_putref(tmr);
+}
+#else /* CONFIG_POSIX_TIMERS */
+static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { }
+static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { }
+#endif /* !CONFIG_POSIX_TIMERS */
+
#endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 0e65b3d634d9..e2d47eb1a7f3 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -28,9 +28,9 @@ struct posix_acl_entry {
struct posix_acl {
refcount_t a_refcount;
- struct rcu_head a_rcu;
unsigned int a_count;
- struct posix_acl_entry a_entries[];
+ struct rcu_head a_rcu;
+ struct posix_acl_entry a_entries[] __counted_by(a_count);
};
#define FOREACH_ACL_ENTRY(pa, acl, pe) \
@@ -62,7 +62,7 @@ posix_acl_release(struct posix_acl *acl)
/* posix_acl.c */
extern void posix_acl_init(struct posix_acl *, int);
-extern struct posix_acl *posix_acl_alloc(int, gfp_t);
+extern struct posix_acl *posix_acl_alloc(unsigned int count, gfp_t flags);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index f7f1e5251c67..f2ed5b72b3d6 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/once.h>
+#include <linux/percpu.h>
#include <linux/random.h>
struct rnd_state {
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index ce76f1a45722..ca86235ac15c 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
extern bool preempt_model_none(void);
extern bool preempt_model_voluntary(void);
extern bool preempt_model_full(void);
+extern bool preempt_model_lazy(void);
#else
@@ -502,6 +503,11 @@ static inline bool preempt_model_full(void)
return IS_ENABLED(CONFIG_PREEMPT);
}
+static inline bool preempt_model_lazy(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_LAZY);
+}
+
#endif
static inline bool preempt_model_rt(void)
@@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void)
*/
static inline bool preempt_model_preemptible(void)
{
- return preempt_model_full() || preempt_model_rt();
+ return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
}
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index eca9bb2ee637..4217a9f412b2 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -166,6 +166,9 @@ __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
extern void __printk_deferred_enter(void);
extern void __printk_deferred_exit(void);
+extern void printk_force_console_enter(void);
+extern void printk_force_console_exit(void);
+
/*
* The printk_deferred_enter/exit macros are available only as a hack for
* some code paths that need to defer all printk console printing. Interrupts
@@ -229,6 +232,14 @@ static inline void printk_deferred_exit(void)
{
}
+static inline void printk_force_console_enter(void)
+{
+}
+
+static inline void printk_force_console_exit(void)
+{
+}
+
static inline int printk_ratelimit(void)
{
return 0;
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 8acd60b53f58..78827f312407 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -49,6 +49,31 @@ enum {
PWMF_EXPORTED = 1,
};
+/**
+ * struct pwm_waveform - description of a PWM waveform
+ * @period_length_ns: PWM period
+ * @duty_length_ns: PWM duty cycle
+ * @duty_offset_ns: offset of the rising edge from the period's start
+ *
+ * This is a representation of a PWM waveform alternative to struct pwm_state
+ * below. It's more expressive than struct pwm_state as it contains a
+ * duty_offset_ns and so can represent offsets other than zero (with .polarity =
+ * PWM_POLARITY_NORMAL) and period - duty_cycle (.polarity =
+ * PWM_POLARITY_INVERSED).
+ *
+ * Note there is no explicit bool for enabled. A "disabled" PWM is represented
+ * by .period_length_ns = 0. Note further that the behaviour of a "disabled" PWM
+ * is undefined. Depending on the hardware's capabilities it might drive the
+ * active or inactive level, go high-z or even continue to toggle.
+ *
+ * The unit for all three members is nanoseconds.
+ */
+struct pwm_waveform {
+ u64 period_length_ns;
+ u64 duty_length_ns;
+ u64 duty_offset_ns;
+};
+
/*
* struct pwm_state - state of a PWM channel
* @period: PWM period (in nanoseconds)
@@ -251,6 +276,11 @@ struct pwm_capture {
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @capture: capture and report PWM signal
+ * @sizeof_wfhw: size (in bytes) of driver specific waveform presentation
+ * @round_waveform_tohw: convert a struct pwm_waveform to driver specific presentation
+ * @round_waveform_fromhw: convert a driver specific waveform presentation to struct pwm_waveform
+ * @read_waveform: read driver specific waveform presentation from hardware
+ * @write_waveform: write driver specific waveform presentation to hardware
* @apply: atomically apply a new PWM config
* @get_state: get the current PWM state.
*/
@@ -259,6 +289,17 @@ struct pwm_ops {
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
+
+ size_t sizeof_wfhw;
+ int (*round_waveform_tohw)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_waveform *wf, void *wfhw);
+ int (*round_waveform_fromhw)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *wfhw, struct pwm_waveform *wf);
+ int (*read_waveform)(struct pwm_chip *chip, struct pwm_device *pwm,
+ void *wfhw);
+ int (*write_waveform)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *wfhw);
+
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state);
int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -275,6 +316,9 @@ struct pwm_ops {
* @of_xlate: request a PWM device given a device tree PWM specifier
* @atomic: can the driver's ->apply() be called in atomic context
* @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip
+ * @operational: signals if the chip can be used (or is already deregistered)
+ * @nonatomic_lock: mutex for nonatomic chips
+ * @atomic_lock: mutex for atomic chips
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
@@ -290,6 +334,16 @@ struct pwm_chip {
/* only used internally by the PWM framework */
bool uses_pwmchip_alloc;
+ bool operational;
+ union {
+ /*
+ * depending on the chip being atomic or not either the mutex or
+ * the spinlock is used. It protects .operational and
+ * synchronizes the callbacks in .ops
+ */
+ struct mutex nonatomic_lock;
+ spinlock_t atomic_lock;
+ };
struct pwm_device pwms[] __counted_by(npwm);
};
@@ -309,9 +363,14 @@ static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data)
}
#if IS_ENABLED(CONFIG_PWM)
-/* PWM user APIs */
+
+/* PWM consumer APIs */
+int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf);
+int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf);
+int pwm_set_waveform_might_sleep(struct pwm_device *pwm, const struct pwm_waveform *wf, bool exact);
int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -436,6 +495,11 @@ static inline int pwm_apply_atomic(struct pwm_device *pwm,
return -EOPNOTSUPP;
}
+static inline int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int pwm_adjust_config(struct pwm_device *pwm)
{
return -EOPNOTSUPP;
diff --git a/include/linux/random.h b/include/linux/random.h
index b0a940af4fff..333cecfca93f 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -145,13 +145,6 @@ declare_get_random_var_wait(u64, u32)
declare_get_random_var_wait(long, unsigned long)
#undef declare_get_random_var
-/*
- * This is designed to be standalone for just prandom
- * users, but for now we include it from <linux/random.h>
- * for legacy reasons.
- */
-#include <linux/prandom.h>
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 6a0999c26c7c..2f630eb8307e 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -14,7 +14,7 @@
*
* If we need to allow unconditional lookups (say as required for NMI context
* usage) we need a more complex setup; this data structure provides this by
- * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * employing the latch technique -- see @write_seqcount_latch_begin -- to
* implement a latched RB-tree which does allow for unconditional lookups by
* virtue of always having (at least) one stable copy of the tree.
*
@@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
* @ops: operators defining the node order
*
* It inserts @node into @root in an ordered fashion such that we can always
- * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ * observe one complete tree. See the comment for write_seqcount_latch_begin().
*
* The inserts use rcu_assign_pointer() to publish the element such that the
* tree structure is stored before we can observe the new @node.
@@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_insert(node, root, 0, ops->less);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
+ write_seqcount_latch_end(&root->seq);
}
/**
@@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
*
* Removes @node from the trees @root in an ordered fashion such that we can
* always observe one complete tree. See the comment for
- * raw_write_seqcount_latch().
+ * write_seqcount_latch_begin().
*
* It is assumed that @node will observe one RCU quiescent state before being
* reused of freed.
@@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_erase(node, root, 0);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
+ write_seqcount_latch_end(&root->seq);
}
/**
@@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root,
unsigned int seq;
do {
- seq = raw_read_seqcount_latch(&root->seq);
+ seq = read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (raw_read_seqcount_latch_retry(&root->seq, seq));
+ } while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0ee270b3f5ed..fe42315f667f 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -165,7 +165,6 @@ static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
static inline void rcu_momentary_eqs(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
-static inline bool rcu_gp_might_be_stalled(void) { return false; }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 90a684f94776..27d86d912781 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -40,7 +40,6 @@ void kvfree_rcu_barrier(void);
void rcu_barrier(void);
void rcu_momentary_eqs(void);
void kfree_rcu_scheduler_running(void);
-bool rcu_gp_might_be_stalled(void);
struct rcu_gp_oldstate {
unsigned long rgos_norm;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f9ccad32fc5c..fd41baccbf3e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -54,7 +54,14 @@ struct sdw_slave;
#define REGMAP_UPSHIFT(s) (-(s))
#define REGMAP_DOWNSHIFT(s) (s)
-/* An enum of all the supported cache types */
+/*
+ * The supported cache types, the default is no cache. Any new caches
+ * should usually use the maple tree cache unless they specifically
+ * require that there are never any allocations at runtime and can't
+ * provide defaults in which case they should use the flat cache. The
+ * rbtree cache *may* have some performance advantage for very low end
+ * systems that make heavy use of cache syncs but is mainly legacy.
+ */
enum regcache_type {
REGCACHE_NONE,
REGCACHE_RBTREE,
@@ -106,17 +113,17 @@ struct reg_sequence {
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
* error return value in case of a error read. In the two former cases,
* the last read value at @addr is stored in @val. Must not be called
* from atomic context if sleep_us or timeout_us are used.
- *
- * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
@@ -133,20 +140,20 @@ struct reg_sequence {
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops).
- * Should be less than ~10us since udelay is used
- * (see Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
- * error return value in case of a error read. In the two former cases,
- * the last read value at @addr is stored in @val.
- *
* This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
*
* Note: In general regmap cannot be used in atomic context. If you want to use
* this macro then first setup your regmap for atomic use (flat or no cache
* and MMIO regmap).
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
*/
#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
({ \
@@ -177,17 +184,17 @@ struct reg_sequence {
* @field: Regmap field to read from
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
* error return value in case of a error read. In the two former cases,
* the last read value at @addr is stored in @val. Must not be called
* from atomic context if sleep_us or timeout_us are used.
- *
- * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
*/
#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
({ \
@@ -1328,6 +1335,15 @@ static inline int regmap_clear_bits(struct regmap *map,
return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
}
+static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
+ unsigned int bits, bool value)
+{
+ if (value)
+ return regmap_set_bits(map, reg, bits);
+ else
+ return regmap_clear_bits(map, reg, bits);
+}
+
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
/**
@@ -1796,6 +1812,13 @@ static inline int regmap_clear_bits(struct regmap *map,
return -EINVAL;
}
+static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
+ unsigned int bits, bool value)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_test_bits(struct regmap *map,
unsigned int reg, unsigned int bits)
{
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index b9ce521910a0..8c3c372ad735 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -168,6 +168,29 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
+#if IS_ENABLED(CONFIG_OF)
+struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+#else
+static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
int regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id);
@@ -350,6 +373,20 @@ devm_regulator_get_optional(struct device *dev, const char *id)
return ERR_PTR(-ENODEV);
}
+static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void regulator_put(struct regulator *regulator)
{
}
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index f230a472ccd3..5b66caf1695d 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -269,6 +269,11 @@ enum regulator_type {
* config but it cannot store it for later usage.
* Callback should return 0 on success or negative ERRNO
* indicating failure.
+ * @init_cb: Optional callback called after the parsing of init_data.
+ * Allows the regulator to perform runtime init if necessary,
+ * such as synching the regulator and the parsed constraints.
+ * Callback should return 0 on success or negative ERRNO
+ * indicating failure.
* @id: Numerical identifier for the regulator.
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
@@ -365,6 +370,8 @@ struct regulator_desc {
int (*of_parse_cb)(struct device_node *,
const struct regulator_desc *,
struct regulator_config *);
+ int (*init_cb)(struct regulator_dev *,
+ struct regulator_config *);
int id;
unsigned int continuous_voltage_range:1;
unsigned n_voltages;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0cd76d264727..b3db09a7429b 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -273,8 +273,6 @@ struct regulator_consumer_supply {
* be usable.
* @num_consumer_supplies: Number of consumer device supplies.
* @consumer_supplies: Consumer device supply configuration.
- *
- * @regulator_init: Callback invoked when the regulator has been registered.
* @driver_data: Data passed to regulator_init.
*/
struct regulator_init_data {
@@ -285,8 +283,7 @@ struct regulator_init_data {
int num_consumer_supplies;
struct regulator_consumer_supply *consumer_supplies;
- /* optional regulator machine specific init */
- int (*regulator_init)(void *driver_data);
+ /* optional regulator machine specific data */
void *driver_data; /* core does not touch this */
};
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 514ddf003efc..2986ced69a02 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -25,6 +25,48 @@ struct reset_control_bulk_data {
struct reset_control *rstc;
};
+#define RESET_CONTROL_FLAGS_BIT_SHARED BIT(0) /* not exclusive */
+#define RESET_CONTROL_FLAGS_BIT_OPTIONAL BIT(1)
+#define RESET_CONTROL_FLAGS_BIT_ACQUIRED BIT(2) /* iff exclusive, not released */
+#define RESET_CONTROL_FLAGS_BIT_DEASSERTED BIT(3)
+
+/**
+ * enum reset_control_flags - Flags that can be passed to the reset_control_get functions
+ * to determine the type of reset control.
+ * These values cannot be OR'd.
+ *
+ * @RESET_CONTROL_EXCLUSIVE: exclusive, acquired,
+ * @RESET_CONTROL_EXCLUSIVE_DEASSERTED: exclusive, acquired, deasserted
+ * @RESET_CONTROL_EXCLUSIVE_RELEASED: exclusive, released,
+ * @RESET_CONTROL_SHARED: shared
+ * @RESET_CONTROL_SHARED_DEASSERTED: shared, deasserted
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE: optional, exclusive, acquired
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED: optional, exclusive, acquired, deasserted
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED: optional, exclusive, released
+ * @RESET_CONTROL_OPTIONAL_SHARED: optional, shared
+ * @RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED: optional, shared, deasserted
+ */
+enum reset_control_flags {
+ RESET_CONTROL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_ACQUIRED,
+ RESET_CONTROL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_ACQUIRED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_EXCLUSIVE_RELEASED = 0,
+ RESET_CONTROL_SHARED = RESET_CONTROL_FLAGS_BIT_SHARED,
+ RESET_CONTROL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_SHARED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_ACQUIRED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_ACQUIRED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = RESET_CONTROL_FLAGS_BIT_OPTIONAL,
+ RESET_CONTROL_OPTIONAL_SHARED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_SHARED,
+ RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_SHARED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+};
+
#ifdef CONFIG_RESET_CONTROLLER
int reset_control_reset(struct reset_control *rstc);
@@ -42,30 +84,25 @@ int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rs
void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
struct reset_control *__of_reset_control_get(struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired);
+ const char *id, int index, enum reset_control_flags flags);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
- int index, bool shared,
- bool optional, bool acquired);
+ int index, enum reset_control_flags flags);
void reset_control_put(struct reset_control *rstc);
int __reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired);
+ enum reset_control_flags flags);
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs);
int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
- const char *id, int index, bool shared,
- bool optional, bool acquired);
+ const char *id, int index, enum reset_control_flags flags);
int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired);
+ enum reset_control_flags flags);
struct reset_control *devm_reset_control_array_get(struct device *dev,
- bool shared, bool optional);
-struct reset_control *of_reset_control_array_get(struct device_node *np,
- bool shared, bool optional,
- bool acquired);
+ enum reset_control_flags flags);
+struct reset_control *of_reset_control_array_get(struct device_node *np, enum reset_control_flags);
int reset_control_get_count(struct device *dev);
@@ -116,17 +153,19 @@ static inline int __device_reset(struct device *dev, bool optional)
static inline struct reset_control *__of_reset_control_get(
struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired)
+ const char *id, int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional,
- bool acquired)
+ int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -162,8 +201,10 @@ reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs)
static inline int
__reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired)
+ enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? 0 : -EOPNOTSUPP;
}
@@ -174,30 +215,36 @@ reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional,
- bool acquired)
+ int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline int
__devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired)
+ enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? 0 : -EOPNOTSUPP;
}
static inline struct reset_control *
-devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
+devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *
-of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
- bool acquired)
+of_reset_control_array_get(struct device_node *np, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -236,7 +283,7 @@ static inline int device_reset_optional(struct device *dev)
static inline struct reset_control *
__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, false, true);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -253,7 +300,7 @@ static inline int __must_check
reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -274,7 +321,7 @@ static inline struct reset_control *
__must_check reset_control_get_exclusive_released(struct device *dev,
const char *id)
{
- return __reset_control_get(dev, id, 0, false, false, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -295,7 +342,7 @@ static inline int __must_check
reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -316,7 +363,8 @@ static inline int __must_check
reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -344,7 +392,7 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, false, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_SHARED);
}
/**
@@ -361,7 +409,7 @@ static inline int __must_check
reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED);
}
/**
@@ -378,7 +426,7 @@ reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, true, true);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -398,7 +446,7 @@ static inline int __must_check
reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -415,7 +463,7 @@ reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, true, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -435,7 +483,7 @@ static inline int __must_check
reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -451,7 +499,7 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, false, true);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -471,7 +519,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
static inline struct reset_control *of_reset_control_get_optional_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, true, true);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -496,7 +544,7 @@ static inline struct reset_control *of_reset_control_get_optional_exclusive(
static inline struct reset_control *of_reset_control_get_shared(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, true, false, false);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED);
}
/**
@@ -513,7 +561,7 @@ static inline struct reset_control *of_reset_control_get_shared(
static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, false, false, true);
+ return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -541,7 +589,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
static inline struct reset_control *of_reset_control_get_shared_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, true, false, false);
+ return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED);
}
/**
@@ -560,7 +608,26 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false, true);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE);
+}
+
+/**
+ * devm_reset_control_get_exclusive_deasserted - resource managed
+ * reset_control_get_exclusive() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_exclusive() + reset_control_deassert(). For reset
+ * controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control * __must_check
+devm_reset_control_get_exclusive_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_DEASSERTED);
}
/**
@@ -580,7 +647,8 @@ static inline int __must_check
devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -599,7 +667,7 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive_released(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -619,7 +687,8 @@ static inline int __must_check
devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -638,7 +707,7 @@ static inline struct reset_control *
__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -658,7 +727,8 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -673,7 +743,26 @@ devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int
static inline struct reset_control *devm_reset_control_get_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, false, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED);
+}
+
+/**
+ * devm_reset_control_get_shared_deasserted - resource managed
+ * reset_control_get_shared() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_shared() + reset_control_deassert(). For reset
+ * controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_shared() for more information.
+ */
+static inline struct reset_control * __must_check
+devm_reset_control_get_shared_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED_DEASSERTED);
}
/**
@@ -693,7 +782,29 @@ static inline int __must_check
devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED);
+}
+
+/**
+ * devm_reset_control_bulk_get_shared_deasserted - resource managed
+ * reset_control_bulk_get_shared() +
+ * reset_control_bulk_deassert()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_shared() + reset_control_bulk_deassert(). For
+ * reset controllers returned from this function, reset_control_bulk_assert() +
+ * reset_control_bulk_put() are called automatically on driver detach.
+ *
+ * See devm_reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_shared_deasserted(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_SHARED_DEASSERTED);
}
/**
@@ -711,7 +822,26 @@ devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *devm_reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true, true);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
+}
+
+/**
+ * devm_reset_control_get_optional_exclusive_deasserted - resource managed
+ * reset_control_get_optional_exclusive() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_exclusive() + reset_control_deassert().
+ * For reset controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_optional_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_optional_exclusive_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED);
}
/**
@@ -731,7 +861,8 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -749,7 +880,26 @@ devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs
static inline struct reset_control *devm_reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, true, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED);
+}
+
+/**
+ * devm_reset_control_get_optional_shared_deasserted - resource managed
+ * reset_control_get_optional_shared() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_shared() + reset_control_deassert(). For
+ * reset controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_optional_shared() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_optional_shared_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED);
}
/**
@@ -769,7 +919,7 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -787,7 +937,7 @@ devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *
devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, false, false, true);
+ return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -803,7 +953,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
static inline struct reset_control *
devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, true, false, false);
+ return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_SHARED);
}
/*
@@ -851,54 +1001,54 @@ static inline struct reset_control *devm_reset_control_get_by_index(
static inline struct reset_control *
devm_reset_control_array_get_exclusive(struct device *dev)
{
- return devm_reset_control_array_get(dev, false, false);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE);
}
static inline struct reset_control *
devm_reset_control_array_get_shared(struct device *dev)
{
- return devm_reset_control_array_get(dev, true, false);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_SHARED);
}
static inline struct reset_control *
devm_reset_control_array_get_optional_exclusive(struct device *dev)
{
- return devm_reset_control_array_get(dev, false, true);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
devm_reset_control_array_get_optional_shared(struct device *dev)
{
- return devm_reset_control_array_get(dev, true, true);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive_released(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false, false);
+ return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
static inline struct reset_control *
of_reset_control_array_get_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, false, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_optional_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, true, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_optional_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, true, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED);
}
#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index a7da7dfc06a2..14b88f551920 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -7,7 +7,6 @@
#include <linux/netdevice.h>
#include <linux/wait.h>
#include <linux/refcount.h>
-#include <linux/cleanup.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -47,13 +46,15 @@ extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
-DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock())
-
extern wait_queue_head_t netdev_unregistering_wq;
extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;
+#define ASSERT_RTNL() \
+ WARN_ONCE(!rtnl_is_locked(), \
+ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
+
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
#else
@@ -95,6 +96,61 @@ static inline bool lockdep_rtnl_is_held(void)
#define rcu_replace_pointer_rtnl(rp, p) \
rcu_replace_pointer(rp, p, lockdep_rtnl_is_held())
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+void __rtnl_net_lock(struct net *net);
+void __rtnl_net_unlock(struct net *net);
+void rtnl_net_lock(struct net *net);
+void rtnl_net_unlock(struct net *net);
+int rtnl_net_trylock(struct net *net);
+int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b);
+
+bool rtnl_net_is_locked(struct net *net);
+
+#define ASSERT_RTNL_NET(net) \
+ WARN_ONCE(!rtnl_net_is_locked(net), \
+ "RTNL_NET: assertion failed at %s (%d)\n", \
+ __FILE__, __LINE__)
+
+bool lockdep_rtnl_net_is_held(struct net *net);
+
+#define rcu_dereference_rtnl_net(net, p) \
+ rcu_dereference_check(p, lockdep_rtnl_net_is_held(net))
+#define rtnl_net_dereference(net, p) \
+ rcu_dereference_protected(p, lockdep_rtnl_net_is_held(net))
+#define rcu_replace_pointer_rtnl_net(net, rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_net_is_held(net))
+#else
+static inline void __rtnl_net_lock(struct net *net) {}
+static inline void __rtnl_net_unlock(struct net *net) {}
+
+static inline void rtnl_net_lock(struct net *net)
+{
+ rtnl_lock();
+}
+
+static inline void rtnl_net_unlock(struct net *net)
+{
+ rtnl_unlock();
+}
+
+static inline int rtnl_net_trylock(struct net *net)
+{
+ return rtnl_trylock();
+}
+
+static inline void ASSERT_RTNL_NET(struct net *net)
+{
+ ASSERT_RTNL();
+}
+
+#define rcu_dereference_rtnl_net(net, p) \
+ rcu_dereference_rtnl(p)
+#define rtnl_net_dereference(net, p) \
+ rtnl_dereference(p)
+#define rcu_replace_pointer_rtnl_net(net, rp, p) \
+ rcu_replace_pointer_rtnl(rp, p)
+#endif
+
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return rtnl_dereference(dev->ingress_queue);
@@ -122,10 +178,6 @@ void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
-#define ASSERT_RTNL() \
- WARN_ONCE(!rtnl_is_locked(), \
- "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
-
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 8544ff05e594..7d81fc6918ee 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -24,13 +24,13 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
-extern void rt_read_lock(rwlock_t *rwlock);
+extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
extern int rt_read_trylock(rwlock_t *rwlock);
-extern void rt_read_unlock(rwlock_t *rwlock);
-extern void rt_write_lock(rwlock_t *rwlock);
-extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
extern int rt_write_trylock(rwlock_t *rwlock);
-extern void rt_write_unlock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bb343136ddd0..f0e9e00d3cf5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1001,7 +1001,7 @@ struct task_struct {
#ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1;
#endif
-#ifdef CONFIG_CPU_SUP_INTEL
+#ifdef CONFIG_X86_BUS_LOCK_DETECT
unsigned reported_split_lock:1;
#endif
#ifdef CONFIG_TASK_DELAY_ACCT
@@ -1441,6 +1441,7 @@ struct task_struct {
/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;
+ unsigned long long ftrace_sleeptime;
/*
* Number of functions that haven't been traced
@@ -1898,7 +1899,7 @@ extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define task_thread_info(task) (&(task)->thread_info)
-#elif !defined(__HAVE_THREAD_FUNCTIONS)
+#else
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -2002,7 +2003,8 @@ static inline void set_tsk_need_resched(struct task_struct *tsk)
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+ atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
+ (atomic_long_t *)&task_thread_info(tsk)->flags);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index 1ddbde64a31b..1d70a9867fb1 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -199,17 +199,18 @@ struct sched_ext_entity {
#ifdef CONFIG_EXT_GROUP_SCHED
struct cgroup *cgrp_moving_from;
#endif
- /* must be the last field, see init_scx_entity() */
struct list_head tasks_node;
};
void sched_ext_free(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
+void scx_softlockup(u32 dur_s);
#else /* !CONFIG_SCHED_CLASS_EXT */
static inline void sched_ext_free(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
+static inline void scx_softlockup(u32 dur_s) {}
#endif /* CONFIG_SCHED_CLASS_EXT */
#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c8ed09ac29ac..d5d03d919df8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -138,6 +138,7 @@ struct signal_struct {
/* POSIX.1b Interval Timers */
unsigned int next_posix_timer_id;
struct hlist_head posix_timers;
+ struct hlist_head ignored_posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
@@ -338,9 +339,6 @@ extern void force_fatal_sig(int);
extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
-extern struct sigqueue *sigqueue_alloc(void);
-extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline void clear_notify_signal(void)
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index bf10bdb487dd..cffad65bdc6a 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/magic.h>
#include <linux/refcount.h>
+#include <linux/kasan.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
@@ -33,7 +34,7 @@ static __always_inline unsigned long *end_of_stack(const struct task_struct *tas
#endif
}
-#elif !defined(__HAVE_THREAD_FUNCTIONS)
+#else
#define task_stack_page(task) ((void *)(task)->stack)
@@ -89,6 +90,7 @@ static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
+ obj = kasan_reset_tag(obj);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 709ad84809e1..341980599c71 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -32,6 +32,11 @@ static inline int secure_computing(void)
}
#else
extern void secure_computing_strict(int this_syscall);
+static inline int __secure_computing(const struct seccomp_data *sd)
+{
+ secure_computing_strict(sd->nr);
+ return 0;
+}
#endif
extern long prctl_get_seccomp(void);
diff --git a/include/linux/security.h b/include/linux/security.h
index 2ec8f3014757..cbdba435b798 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,6 +34,10 @@
#include <linux/sockptr.h>
#include <linux/bpf.h>
#include <uapi/linux/lsm.h>
+#include <linux/lsm/selinux.h>
+#include <linux/lsm/smack.h>
+#include <linux/lsm/apparmor.h>
+#include <linux/lsm/bpf.h>
struct linux_binprm;
struct cred;
@@ -152,6 +156,16 @@ enum lockdown_reason {
LOCKDOWN_CONFIDENTIALITY_MAX,
};
+/*
+ * Data exported by the security modules
+ */
+struct lsm_prop {
+ struct lsm_prop_selinux selinux;
+ struct lsm_prop_smack smack;
+ struct lsm_prop_apparmor apparmor;
+ struct lsm_prop_bpf bpf;
+};
+
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
extern u32 lsm_active_cnt;
extern const struct lsm_id *lsm_idlist[];
@@ -269,8 +283,32 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
return kernel_load_data_str[id];
}
+/**
+ * lsmprop_init - initialize a lsm_prop structure
+ * @prop: Pointer to the data to initialize
+ *
+ * Set all secid for all modules to the specified value.
+ */
+static inline void lsmprop_init(struct lsm_prop *prop)
+{
+ memset(prop, 0, sizeof(*prop));
+}
+
#ifdef CONFIG_SECURITY
+/**
+ * lsmprop_is_set - report if there is a value in the lsm_prop
+ * @prop: Pointer to the exported LSM data
+ *
+ * Returns true if there is a value set, false otherwise
+ */
+static inline bool lsmprop_is_set(struct lsm_prop *prop)
+{
+ const struct lsm_prop empty = {};
+
+ return !!memcmp(prop, &empty, sizeof(*prop));
+}
+
int call_blocking_lsm_notifier(enum lsm_event event, void *data);
int register_blocking_lsm_notifier(struct notifier_block *nb);
int unregister_blocking_lsm_notifier(struct notifier_block *nb);
@@ -408,7 +446,7 @@ int security_inode_getsecurity(struct mnt_idmap *idmap,
void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
-void security_inode_getsecid(struct inode *inode, u32 *secid);
+void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop);
int security_inode_copy_up(struct dentry *src, struct cred **new);
int security_inode_copy_up_xattr(struct dentry *src, const char *name);
int security_inode_setintegrity(const struct inode *inode,
@@ -444,6 +482,7 @@ void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
void security_cred_getsecid(const struct cred *c, u32 *secid);
+void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
@@ -463,8 +502,8 @@ int security_task_fix_setgroups(struct cred *new, const struct cred *old);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
-void security_current_getsecid_subj(u32 *secid);
-void security_task_getsecid_obj(struct task_struct *p, u32 *secid);
+void security_current_getlsmprop_subj(struct lsm_prop *prop);
+void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -482,7 +521,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
-void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
+void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop);
int security_msg_msg_alloc(struct msg_msg *msg);
void security_msg_msg_free(struct msg_msg *msg);
int security_msg_queue_alloc(struct kern_ipc_perm *msq);
@@ -515,6 +554,7 @@ int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
+int security_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, u32 *seclen);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(char *secdata, u32 seclen);
void security_inode_invalidate_secctx(struct inode *inode);
@@ -531,6 +571,17 @@ int security_bdev_setintegrity(struct block_device *bdev,
size_t size);
#else /* CONFIG_SECURITY */
+/**
+ * lsmprop_is_set - report if there is a value in the lsm_prop
+ * @prop: Pointer to the exported LSM data
+ *
+ * Returns true if there is a value set, false otherwise
+ */
+static inline bool lsmprop_is_set(struct lsm_prop *prop)
+{
+ return false;
+}
+
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return 0;
@@ -1020,9 +1071,10 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer,
return 0;
}
-static inline void security_inode_getsecid(struct inode *inode, u32 *secid)
+static inline void security_inode_getlsmprop(struct inode *inode,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
@@ -1172,6 +1224,10 @@ static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
*secid = 0;
}
+static inline void security_cred_getlsmprop(const struct cred *c,
+ struct lsm_prop *prop)
+{ }
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -1249,14 +1305,15 @@ static inline int security_task_getsid(struct task_struct *p)
return 0;
}
-static inline void security_current_getsecid_subj(u32 *secid)
+static inline void security_current_getlsmprop_subj(struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
-static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
+static inline void security_task_getlsmprop_obj(struct task_struct *p,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_task_setnice(struct task_struct *p, int nice)
@@ -1332,9 +1389,10 @@ static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
return 0;
}
-static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+static inline void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_msg_msg_alloc(struct msg_msg *msg)
@@ -1468,7 +1526,14 @@ static inline int security_ismaclabel(const char *name)
return 0;
}
-static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+static inline int security_secid_to_secctx(u32 secid, char **secdata,
+ u32 *seclen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_lsmprop_to_secctx(struct lsm_prop *prop,
+ char **secdata, u32 *seclen)
{
return -EOPNOTSUPP;
}
@@ -2095,7 +2160,8 @@ static inline void security_key_post_create_or_update(struct key *keyring,
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule);
-int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule);
void security_audit_rule_free(void *lsmrule);
#else
@@ -2111,8 +2177,8 @@ static inline int security_audit_rule_known(struct audit_krule *krule)
return 0;
}
-static inline int security_audit_rule_match(u32 secid, u32 field, u32 op,
- void *lsmrule)
+static inline int security_audit_rule_match(struct lsm_prop *prop, u32 field,
+ u32 op, void *lsmrule)
{
return 0;
}
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 2ac50822554e..80f33a93f944 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -52,6 +52,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_GET_GEOMETRY:
case IOC_OPAL_DISCOVERY:
case IOC_OPAL_REVERT_LSP:
+ case IOC_OPAL_SET_SID_PW:
return true;
}
return false;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index fffeb754880f..5298765d6ca4 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -622,6 +622,23 @@ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *
}
/**
+ * read_seqcount_latch() - pick even/odd latch data copy
+ * @s: Pointer to seqcount_latch_t
+ *
+ * See write_seqcount_latch() for details and a full reader/writer usage
+ * example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+ * picking which data copy to read. The full counter must then be checked
+ * with read_seqcount_latch_retry().
+ */
+static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
+{
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
+ return raw_read_seqcount_latch(s);
+}
+
+/**
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
@@ -636,8 +653,33 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
}
/**
+ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * @s: Pointer to seqcount_latch_t
+ * @start: count, from read_seqcount_latch()
+ *
+ * Return: true if a read section retry is required, else false
+ */
+static __always_inline int
+read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+{
+ kcsan_atomic_next(0);
+ return raw_read_seqcount_latch_retry(s, start);
+}
+
+/**
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
* @s: Pointer to seqcount_latch_t
+ */
+static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+{
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->seqcount.sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
+}
+
+/**
+ * write_seqcount_latch_begin() - redirect latch readers to odd copy
+ * @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -665,17 +707,11 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
*
* void latch_modify(struct latch_struct *latch, ...)
* {
- * smp_wmb(); // Ensure that the last data[1] update is visible
- * latch->seq.sequence++;
- * smp_wmb(); // Ensure that the seqcount update is visible
- *
+ * write_seqcount_latch_begin(&latch->seq);
* modify(latch->data[0], ...);
- *
- * smp_wmb(); // Ensure that the data[0] update is visible
- * latch->seq.sequence++;
- * smp_wmb(); // Ensure that the seqcount update is visible
- *
+ * write_seqcount_latch(&latch->seq);
* modify(latch->data[1], ...);
+ * write_seqcount_latch_end(&latch->seq);
* }
*
* The query will have a form like::
@@ -686,13 +722,13 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* unsigned seq, idx;
*
* do {
- * seq = raw_read_seqcount_latch(&latch->seq);
+ * seq = read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
- * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
+ * } while (read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@@ -716,11 +752,31 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
{
- smp_wmb(); /* prior stores before incrementing "sequence" */
- s->seqcount.sequence++;
- smp_wmb(); /* increment "sequence" before following stores */
+ kcsan_nestable_atomic_begin();
+ raw_write_seqcount_latch(s);
+}
+
+/**
+ * write_seqcount_latch() - redirect latch readers to even copy
+ * @s: Pointer to seqcount_latch_t
+ */
+static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
+{
+ raw_write_seqcount_latch(s);
+}
+
+/**
+ * write_seqcount_latch_end() - end a seqcount_latch_t write section
+ * @s: Pointer to seqcount_latch_t
+ *
+ * Marks the end of a seqcount_latch_t writer section, after all copies of the
+ * latch-protected data have been updated.
+ */
+static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
+{
+ kcsan_nestable_atomic_end();
}
#define __SEQLOCK_UNLOCKED(lockname) \
@@ -754,11 +810,7 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- unsigned ret = read_seqcount_begin(&sl->seqcount);
-
- kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
- kcsan_flat_atomic_begin();
- return ret;
+ return read_seqcount_begin(&sl->seqcount);
}
/**
@@ -774,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
- /*
- * Assume not nested: read_seqretry() may be called multiple times when
- * completing read critical section.
- */
- kcsan_flat_atomic_end();
-
return read_seqcount_retry(&sl->seqcount, start);
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 4ab65874a850..743b4afaad4c 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -505,7 +505,11 @@ struct uart_port {
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
+#ifdef CONFIG_HAS_IOPORT
#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
+#else
+#define UPF_FOURPORT 0
+#endif
#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 515a9a6a3c6f..018da28c01e7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -42,10 +42,10 @@ struct shmem_inode_info {
struct inode vfs_inode;
};
-#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL)
#define SHMEM_FL_USER_MODIFIABLE \
- (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
-#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
struct shmem_quota_limits {
qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 39f1d16f3628..58009fa66102 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -31,6 +31,7 @@
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <linux/llist.h>
+#include <linux/page_frag_cache.h>
#include <net/flow.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
@@ -2681,6 +2682,12 @@ static inline void skb_assert_len(struct sk_buff *skb)
#endif /* CONFIG_DEBUG_NET */
}
+#if defined(CONFIG_FAIL_SKB_REALLOC)
+void skb_might_realloc(struct sk_buff *skb);
+#else
+static inline void skb_might_realloc(struct sk_buff *skb) {}
+#endif
+
/*
* Add data to an sk_buff
*/
@@ -2781,6 +2788,7 @@ static inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_might_realloc(skb);
if (likely(len <= skb_headlen(skb)))
return SKB_NOT_DROPPED_YET;
@@ -2909,9 +2917,19 @@ static inline void skb_reset_inner_headers(struct sk_buff *skb)
skb->inner_transport_header = skb->transport_header;
}
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+ return skb->mac_header != (typeof(skb->mac_header))~0U;
+}
+
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
- skb->mac_len = skb->network_header - skb->mac_header;
+ if (!skb_mac_header_was_set(skb)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ skb->mac_len = 0;
+ } else {
+ skb->mac_len = skb->network_header - skb->mac_header;
+ }
}
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
@@ -2927,7 +2945,10 @@ static inline int skb_inner_transport_offset(const struct sk_buff *skb)
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
- skb->inner_transport_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset);
+ skb->inner_transport_header = offset;
}
static inline void skb_set_inner_transport_header(struct sk_buff *skb,
@@ -2944,7 +2965,10 @@ static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
- skb->inner_network_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset);
+ skb->inner_network_header = offset;
}
static inline void skb_set_inner_network_header(struct sk_buff *skb,
@@ -2966,7 +2990,10 @@ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
- skb->inner_mac_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset);
+ skb->inner_mac_header = offset;
}
static inline void skb_set_inner_mac_header(struct sk_buff *skb,
@@ -2988,7 +3015,10 @@ static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
- skb->transport_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset);
+ skb->transport_header = offset;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
@@ -3005,7 +3035,10 @@ static inline unsigned char *skb_network_header(const struct sk_buff *skb)
static inline void skb_reset_network_header(struct sk_buff *skb)
{
- skb->network_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset);
+ skb->network_header = offset;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
@@ -3014,11 +3047,6 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
skb->network_header += offset;
}
-static inline int skb_mac_header_was_set(const struct sk_buff *skb)
-{
- return skb->mac_header != (typeof(skb->mac_header))~0U;
-}
-
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
@@ -3043,7 +3071,10 @@ static inline void skb_unset_mac_header(struct sk_buff *skb)
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
- skb->mac_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset);
+ skb->mac_header = offset;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
@@ -3130,9 +3161,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb)
return skb_inner_network_header(skb) - skb->data;
}
+static inline enum skb_drop_reason
+pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len)
+{
+ return pskb_may_pull_reason(skb, skb_network_offset(skb) + len);
+}
+
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
- return pskb_may_pull(skb, skb_network_offset(skb) + len);
+ return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
/*
@@ -3210,6 +3247,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
+ skb_might_realloc(skb);
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
@@ -3964,6 +4002,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
+ skb_might_realloc(skb);
if (likely(len >= skb->len))
return 0;
return pskb_trim_rcsum_slow(skb, len);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b35e2db7eb0e..0268ea7abf8b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -448,6 +448,7 @@ void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
/**
* ksize - Report actual allocation size of associated object
diff --git a/include/linux/soc/mediatek/dvfsrc.h b/include/linux/soc/mediatek/dvfsrc.h
new file mode 100644
index 000000000000..1498b3ed396b
--- /dev/null
+++ b/include/linux/soc/mediatek/dvfsrc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MEDIATEK_DVFSRC_H
+#define __MEDIATEK_DVFSRC_H
+
+enum mtk_dvfsrc_cmd {
+ MTK_DVFSRC_CMD_BW,
+ MTK_DVFSRC_CMD_HRT_BW,
+ MTK_DVFSRC_CMD_PEAK_BW,
+ MTK_DVFSRC_CMD_OPP,
+ MTK_DVFSRC_CMD_VCORE_LEVEL,
+ MTK_DVFSRC_CMD_VSCP_LEVEL,
+ MTK_DVFSRC_CMD_MAX,
+};
+
+#if IS_ENABLED(CONFIG_MTK_DVFSRC)
+
+int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data);
+int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data);
+
+#else
+
+static inline int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data)
+{ return -ENODEV; }
+
+static inline int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data)
+{ return -ENODEV; }
+
+#endif /* CONFIG_MTK_DVFSRC */
+
+#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 6c6cccc848f4..9956e18c5ffa 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -434,6 +434,11 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
+#define MT6735_TOP_AXI_PROT_EN_CONN (BIT(2) | BIT(8))
+#define MT6735_TOP_AXI_PROT_EN_MD1 (BIT(24) | BIT(25) | \
+ BIT(26) | BIT(27) | \
+ BIT(28))
+
#define INFRA_TOPAXI_PROTECTEN 0x0220
#define INFRA_TOPAXI_PROTECTSTA1 0x0228
#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
index 0761128b4354..abe24a73ee19 100644
--- a/include/linux/soc/mediatek/mtk_sip_svc.h
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -22,6 +22,9 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
ARM_SMCCC_OWNER_SIP, fn_id)
+/* DVFSRC SMC calls */
+#define MTK_SIP_DVFSRC_VCOREFS_CONTROL MTK_SIP_SMC_CMD(0x506)
+
/* IOMMU related SMC call */
#define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514)
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 9e9f528b1370..8e5d78fb4847 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -54,7 +54,19 @@
#define LLCC_CAMEXP4 52
#define LLCC_DISP_WB 53
#define LLCC_DISP_1 54
+#define LLCC_VIEYE 57
+#define LLCC_VIDPTH 58
+#define LLCC_GPUMV 59
+#define LLCC_EVA_LEFT 60
+#define LLCC_EVA_RIGHT 61
+#define LLCC_EVAGAIN 62
+#define LLCC_VIPTH 63
#define LLCC_VIDVSP 64
+#define LLCC_DISP_LEFT 65
+#define LLCC_DISP_RIGHT 66
+#define LLCC_EVCS_LEFT 67
+#define LLCC_EVCS_RIGHT 68
+#define LLCC_SPAD 69
/**
* struct llcc_slice_desc - Cache slice descriptor
@@ -125,6 +137,7 @@ struct llcc_edac_reg_offset {
* @num_banks: Number of llcc banks
* @bitmap: Bit map to track the active slice ids
* @ecc_irq: interrupt for llcc cache error detection and reporting
+ * @ecc_irq_configured: 'True' if firmware has already configured the irq propagation
* @version: Indicates the LLCC version
*/
struct llcc_drv_data {
@@ -139,6 +152,7 @@ struct llcc_drv_data {
u32 num_banks;
unsigned long *bitmap;
int ecc_irq;
+ bool ecc_irq_configured;
u32 version;
};
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index bd0d11af76c5..fd104b666836 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -195,6 +195,35 @@ struct ti_sci_clk_ops {
u64 *current_freq);
};
+/* TISCI LPM IO isolation control values */
+#define TISCI_MSG_VALUE_IO_ENABLE 1
+#define TISCI_MSG_VALUE_IO_DISABLE 0
+
+/* TISCI LPM constraint state values */
+#define TISCI_MSG_CONSTRAINT_SET 1
+#define TISCI_MSG_CONSTRAINT_CLR 0
+
+/**
+ * struct ti_sci_pm_ops - Low Power Mode (LPM) control operations
+ * @lpm_wake_reason: Get the wake up source that woke the SoC from LPM
+ * - source: The wake up source that woke soc from LPM.
+ * - timestamp: Timestamp at which soc woke.
+ * @set_device_constraint: Set LPM constraint on behalf of a device
+ * - id: Device Identifier
+ * - state: The desired state of device constraint: set or clear.
+ * @set_latency_constraint: Set LPM resume latency constraint
+ * - latency: maximum acceptable latency to wake up from low power mode
+ * - state: The desired state of latency constraint: set or clear.
+ */
+struct ti_sci_pm_ops {
+ int (*lpm_wake_reason)(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp, u8 *pin, u8 *mode);
+ int (*set_device_constraint)(const struct ti_sci_handle *handle,
+ u32 id, u8 state);
+ int (*set_latency_constraint)(const struct ti_sci_handle *handle,
+ u16 latency, u8 state);
+};
+
/**
* struct ti_sci_resource_desc - Description of TI SCI resource instance range.
* @start: Start index of the first resource range.
@@ -539,6 +568,7 @@ struct ti_sci_ops {
struct ti_sci_core_ops core_ops;
struct ti_sci_dev_ops dev_ops;
struct ti_sci_clk_ops clk_ops;
+ struct ti_sci_pm_ops pm_ops;
struct ti_sci_rm_core_ops rm_core_ops;
struct ti_sci_rm_irq_ops rm_irq_ops;
struct ti_sci_rm_ringacc_ops rm_ring_ops;
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index fc5a206c4043..195debe2b1db 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -77,7 +77,9 @@ static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
{
if (optlen < ksize)
return -EINVAL;
- return copy_from_sockptr(dst, optval, ksize);
+ if (copy_from_sockptr(dst, optval, ksize))
+ return -EFAULT;
+ return 0;
}
static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4b95663163e0..8497f4747e24 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -824,21 +824,6 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
-static inline struct spi_controller *spi_alloc_master(struct device *host,
- unsigned int size)
-{
- return __spi_alloc_controller(host, size, false);
-}
-
-static inline struct spi_controller *spi_alloc_slave(struct device *host,
- unsigned int size)
-{
- if (!IS_ENABLED(CONFIG_SPI_SLAVE))
- return NULL;
-
- return __spi_alloc_controller(host, size, true);
-}
-
static inline struct spi_controller *spi_alloc_host(struct device *dev,
unsigned int size)
{
@@ -858,21 +843,6 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
bool slave);
-static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
- unsigned int size)
-{
- return __devm_spi_alloc_controller(dev, size, false);
-}
-
-static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
- unsigned int size)
-{
- if (!IS_ENABLED(CONFIG_SPI_SLAVE))
- return NULL;
-
- return __devm_spi_alloc_controller(dev, size, true);
-}
-
static inline struct spi_controller *devm_spi_alloc_host(struct device *dev,
unsigned int size)
{
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 61c49b16f69a..f6499c37157d 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -16,26 +16,25 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
}
#endif
-#define spin_lock_init(slock) \
+#define __spin_lock_init(slock, name, key, percpu) \
do { \
- static struct lock_class_key __key; \
- \
rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key, false); \
+ __rt_spin_lock_init(slock, name, key, percpu); \
} while (0)
-#define local_spin_lock_init(slock) \
+#define _spin_lock_init(slock, percpu) \
do { \
static struct lock_class_key __key; \
- \
- rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key, true); \
+ __spin_lock_init(slock, #slock, &__key, percpu); \
} while (0)
-extern void rt_spin_lock(spinlock_t *lock);
-extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
-extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
-extern void rt_spin_unlock(spinlock_t *lock);
+#define spin_lock_init(slock) _spin_lock_init(slock, false)
+#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
+
+extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
+extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
extern int rt_spin_trylock_bh(spinlock_t *lock);
extern int rt_spin_trylock(spinlock_t *lock);
@@ -132,7 +131,7 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
#define spin_trylock_irq(lock) \
__cond_lock(lock, rt_spin_trylock(lock))
-#define __spin_trylock_irqsave(lock, flags) \
+#define spin_trylock_irqsave(lock, flags) \
({ \
int __locked; \
\
@@ -142,9 +141,6 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
__locked; \
})
-#define spin_trylock_irqsave(lock, flags) \
- __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
-
#define spin_is_contended(lock) (((void)(lock), 0))
static inline int spin_is_locked(spinlock_t *lock)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 835bbb2d1f88..08339eb8a01c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -56,6 +56,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+#ifdef CONFIG_TINY_SRCU
+#define __srcu_read_lock_lite __srcu_read_lock
+#define __srcu_read_unlock_lite __srcu_read_unlock
+#else // #ifdef CONFIG_TINY_SRCU
+int __srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) __releases(ssp);
+#endif // #else // #ifdef CONFIG_TINY_SRCU
void synchronize_srcu(struct srcu_struct *ssp);
#define SRCU_GET_STATE_COMPLETED 0x1
@@ -176,17 +183,6 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-#define SRCU_NMI_UNKNOWN 0x0
-#define SRCU_NMI_UNSAFE 0x1
-#define SRCU_NMI_SAFE 0x2
-
-#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
-void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
-#else
-static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
- bool nmi_safe) { }
-#endif
-
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
@@ -236,33 +232,67 @@ static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
*
- * Note that srcu_read_lock() and the matching srcu_read_unlock() must
- * occur in the same context, for example, it is illegal to invoke
- * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
- * was invoked in process context.
+ * The return value from srcu_read_lock() must be passed unaltered
+ * to the matching srcu_read_unlock(). Note that srcu_read_lock() and
+ * the matching srcu_read_unlock() must occur in the same context, for
+ * example, it is illegal to invoke srcu_read_unlock() in an irq handler
+ * if the matching srcu_read_lock() was invoked in process context. Or,
+ * for that matter to invoke srcu_read_unlock() from one task and the
+ * matching srcu_read_lock() from another.
*/
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
srcu_lock_acquire(&ssp->dep_map);
return retval;
}
/**
+ * srcu_read_lock_lite - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but for a light-weight
+ * smp_mb()-free reader. See srcu_read_lock() for more information.
+ *
+ * If srcu_read_lock_lite() is ever used on an srcu_struct structure,
+ * then none of the other flavors may be used, whether before, during,
+ * or after. Note that grace-period auto-expediting is disabled for _lite
+ * srcu_struct structures because auto-expedited grace periods invoke
+ * synchronize_rcu_expedited(), IPIs and all.
+ *
+ * Note that srcu_read_lock_lite() can be invoked only from those contexts
+ * where RCU is watching, that is, from contexts where it would be legal
+ * to invoke rcu_read_lock(). Otherwise, lockdep will complain.
+ */
+static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp)
+{
+ int retval;
+
+ srcu_check_read_flavor_lite(ssp);
+ retval = __srcu_read_lock_lite(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/**
* srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section, but in an NMI-safe manner.
* See srcu_read_lock() for more information.
+ *
+ * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure,
+ * then none of the other flavors may be used, whether before, during,
+ * or after.
*/
static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- srcu_check_nmi_safety(ssp, true);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
retval = __srcu_read_lock_nmisafe(ssp);
rcu_try_lock_acquire(&ssp->dep_map);
return retval;
@@ -274,7 +304,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
return retval;
}
@@ -303,7 +333,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
{
WARN_ON_ONCE(in_nmi());
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
return __srcu_read_lock(ssp);
}
@@ -318,12 +348,28 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock(ssp, idx);
}
/**
+ * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock().
+ *
+ * Exit a light-weight SRCU read-side critical section.
+ */
+static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(idx & ~0x1);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
+ srcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_lite(ssp, idx);
+}
+
+/**
* srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
@@ -334,7 +380,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
- srcu_check_nmi_safety(ssp, true);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
rcu_lock_release(&ssp->dep_map);
__srcu_read_unlock_nmisafe(ssp, idx);
}
@@ -343,7 +389,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
@@ -360,7 +406,7 @@ static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 4d96bbdb45f0..1321da803274 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -81,6 +81,9 @@ static inline void srcu_barrier(struct srcu_struct *ssp)
synchronize_srcu(ssp);
}
+#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
+#define srcu_check_read_flavor_lite(ssp) do { } while (0)
+
/* Defined here to avoid size increase for non-torture kernels. */
static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf)
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index ed57598394de..490aeecc6bb4 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -25,7 +25,7 @@ struct srcu_data {
/* Read-side state. */
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
- int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
+ int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
/* Update-side state. */
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
@@ -43,6 +43,11 @@ struct srcu_data {
struct srcu_struct *ssp;
};
+/* Values for ->srcu_reader_flavor. */
+#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
+#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
+#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite().
+
/*
* Node in SRCU combining tree, similar in function to rcu_data.
*/
@@ -204,4 +209,64 @@ void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Returns an index that must be passed to the matching
+ * srcu_read_unlock_lite().
+ *
+ * Note that this_cpu_inc() is an RCU read-side critical section either
+ * because it disables interrupts, because it is a single instruction,
+ * or because it is a read-modify-write atomic operation, depending on
+ * the whims of the architecture.
+ */
+static inline int __srcu_read_lock_lite(struct srcu_struct *ssp)
+{
+ int idx;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite().");
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
+ this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); /* Y */
+ barrier(); /* Avoid leaking the critical section. */
+ return idx;
+}
+
+/*
+ * Removes the count for the old reader from the appropriate
+ * per-CPU element of the srcu_struct. Note that this may well be a
+ * different CPU than that which was incremented by the corresponding
+ * srcu_read_lock_lite(), but it must be within the same task.
+ *
+ * Note that this_cpu_inc() is an RCU read-side critical section either
+ * because it disables interrupts, because it is a single instruction,
+ * or because it is a read-modify-write atomic operation, depending on
+ * the whims of the architecture.
+ */
+static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
+{
+ barrier(); /* Avoid leaking the critical section. */
+ this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); /* Z */
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite().");
+}
+
+void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
+
+// Record _lite() usage even for CONFIG_PROVE_RCU=n kernels.
+static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp)
+{
+ struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
+
+ if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE))
+ return;
+
+ // Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered.
+ __srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
+}
+
+// Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels.
+static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
+{
+ if (IS_ENABLED(CONFIG_PROVE_RCU))
+ __srcu_check_read_flavor(ssp, read_flavor);
+}
+
#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 5758104921e6..c6333204d451 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -77,6 +77,7 @@ struct cachestat_range;
struct cachestat;
struct statmount;
struct mnt_id_req;
+struct xattr_args;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -338,23 +339,35 @@ asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
void __user *arg, unsigned int nr_args);
asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
+asmlinkage long sys_setxattrat(int dfd, const char __user *path, unsigned int at_flags,
+ const char __user *name,
+ const struct xattr_args __user *args, size_t size);
asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_fsetxattr(int fd, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
void __user *value, size_t size);
+asmlinkage long sys_getxattrat(int dfd, const char __user *path, unsigned int at_flags,
+ const char __user *name,
+ struct xattr_args __user *args, size_t size);
asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
void __user *value, size_t size);
asmlinkage long sys_fgetxattr(int fd, const char __user *name,
void __user *value, size_t size);
asmlinkage long sys_listxattr(const char __user *path, char __user *list,
size_t size);
+asmlinkage long sys_listxattrat(int dfd, const char __user *path,
+ unsigned int at_flags,
+ char __user *list, size_t size);
asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
size_t size);
asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
asmlinkage long sys_removexattr(const char __user *path,
const char __user *name);
+asmlinkage long sys_removexattrat(int dfd, const char __user *path,
+ unsigned int at_flags,
+ const char __user *name);
asmlinkage long sys_lremovexattr(const char __user *path,
const char __user *name);
asmlinkage long sys_fremovexattr(int fd, const char __user *name);
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index bef5f06a91de..07cbab516942 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -60,12 +60,19 @@ struct efifb_dmi_info {
void sysfb_disable(struct device *dev);
+bool sysfb_handles_screen_info(void);
+
#else /* CONFIG_SYSFB */
static inline void sysfb_disable(struct device *dev)
{
}
+static inline bool sysfb_handles_screen_info(void)
+{
+ return false;
+}
+
#endif /* CONFIG_SYSFB */
#ifdef CONFIG_EFI
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 6a5e08b937b3..f88daaa76d83 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -200,7 +200,6 @@ struct tcp_sock {
/* TX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_tx);
- /* timestamp of last sent data packet (for restart window) */
u32 max_window; /* Maximal window ever seen from peer */
u32 rcv_ssthresh; /* Current window clamp */
u32 reordering; /* Packet reordering metric. */
@@ -263,7 +262,7 @@ struct tcp_sock {
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
- u32 lsndtime;
+ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 mdev_us; /* medium deviation */
u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 25ea8fe2313e..754802478b96 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -56,6 +56,9 @@ enum thermal_notify_event {
THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */
+ THERMAL_TZ_ADD_THRESHOLD, /* Threshold added */
+ THERMAL_TZ_DEL_THRESHOLD, /* Threshold deleted */
+ THERMAL_TZ_FLUSH_THRESHOLDS, /* All thresholds deleted */
};
/**
@@ -137,6 +140,9 @@ struct thermal_cooling_device {
#endif
};
+DEFINE_GUARD(cooling_dev, struct thermal_cooling_device *, mutex_lock(&_T->lock),
+ mutex_unlock(&_T->lock))
+
/* Structure to define Thermal Zone parameters */
struct thermal_zone_params {
const char *governor_name;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 9ea0b28068f4..cf2446c9c30d 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -59,6 +59,14 @@ enum syscall_work_bit {
#include <asm/thread_info.h>
+#ifndef TIF_NEED_RESCHED_LAZY
+#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
+#error Inconsistent PREEMPT_LAZY
+#endif
+#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
+#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
+#endif
+
#ifdef __KERNEL__
#ifndef arch_set_restart_data
@@ -179,22 +187,27 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool tif_test_bit(int bit)
{
- return arch_test_bit(TIF_NEED_RESCHED,
+ return arch_test_bit(bit,
(unsigned long *)(&current_thread_info()->flags));
}
#else
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool tif_test_bit(int bit)
{
- return test_bit(TIF_NEED_RESCHED,
+ return test_bit(bit,
(unsigned long *)(&current_thread_info()->flags));
}
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
+static __always_inline bool tif_need_resched(void)
+{
+ return tif_test_bit(TIF_NEED_RESCHED);
+}
+
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
const void * const stackend,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 99c9c5a7252a..b8ddc8e631a3 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -20,12 +20,10 @@ extern void __init tick_init(void);
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
extern void tick_resume_local(void);
-extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
-static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU)
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 902c20ef495a..e39d4d563b19 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -26,7 +26,7 @@
* occupies a single 64byte cache line.
*
* The struct is separate from struct timekeeper as it is also used
- * for a fast NMI safe accessors.
+ * for the fast NMI safe accessors.
*
* @base_real is for the fast NMI safe accessor to allow reading clock
* realtime from any context.
@@ -44,33 +44,38 @@ struct tk_read_base {
/**
* struct timekeeper - Structure holding internal timekeeping values.
- * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
- * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
- * @xtime_sec: Current CLOCK_REALTIME time in seconds
- * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
- * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
- * @offs_real: Offset clock monotonic -> clock realtime
- * @offs_boot: Offset clock monotonic -> clock boottime
- * @offs_tai: Offset clock monotonic -> clock tai
- * @tai_offset: The current UTC to TAI offset in seconds
- * @clock_was_set_seq: The sequence number of clock was set events
- * @cs_was_changed_seq: The sequence number of clocksource change events
- * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
- * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
- * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
- * @cycle_interval: Number of clock cycles in one NTP interval
- * @xtime_interval: Number of clock shifted nano seconds in one NTP
- * interval.
- * @xtime_remainder: Shifted nano seconds left over when rounding
- * @cycle_interval
- * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
- * @ntp_error: Difference between accumulated time and NTP time in ntp
- * shifted nano seconds.
- * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
- * ntp shifted nano seconds.
- * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
- * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
- * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
+ * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
+ * @xtime_sec: Current CLOCK_REALTIME time in seconds
+ * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
+ * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
+ * @offs_real: Offset clock monotonic -> clock realtime
+ * @offs_boot: Offset clock monotonic -> clock boottime
+ * @offs_tai: Offset clock monotonic -> clock tai
+ * @tai_offset: The current UTC to TAI offset in seconds
+ * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
+ * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_was_changed_seq: The sequence number of clocksource change events
+ * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
+ * @cycle_interval: Number of clock cycles in one NTP interval
+ * @xtime_interval: Number of clock shifted nano seconds in one NTP
+ * interval.
+ * @xtime_remainder: Shifted nano seconds left over when rounding
+ * @cycle_interval
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
+ * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
+ * @ntp_tick: The ntp_tick_length() value currently being
+ * used. This cached copy ensures we consistently
+ * apply the tick length for an entire tick, as
+ * ntp_tick_length may change mid-tick, and we don't
+ * want to apply that new value to the tick in
+ * progress.
+ * @ntp_error: Difference between accumulated time and NTP time in ntp
+ * shifted nano seconds.
+ * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
+ * ntp shifted nano seconds.
+ * @ntp_err_mult: Multiplication factor for scaled math conversion
+ * @skip_second_overflow: Flag used to avoid updating NTP twice with same second
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffy times)
@@ -88,10 +93,28 @@ struct tk_read_base {
*
* @monotonic_to_boottime is a timespec64 representation of @offs_boot to
* accelerate the VDSO update for CLOCK_BOOTTIME.
+ *
+ * The cacheline ordering of the structure is optimized for in kernel usage of
+ * the ktime_get() and ktime_get_ts64() family of time accessors. Struct
+ * timekeeper is prepended in the core timekeeping code with a sequence count,
+ * which results in the following cacheline layout:
+ *
+ * 0: seqcount, tkr_mono
+ * 1: xtime_sec ... tai_offset
+ * 2: tkr_raw, raw_sec
+ * 3,4: Internal variables
+ *
+ * Cacheline 0,1 contain the data which is used for accessing
+ * CLOCK_MONOTONIC/REALTIME/BOOTTIME/TAI, while cacheline 2 contains the
+ * data for accessing CLOCK_MONOTONIC_RAW. Cacheline 3,4 are internal
+ * variables which are only accessed during timekeeper updates once per
+ * tick.
*/
struct timekeeper {
+ /* Cacheline 0 (together with prepended seqcount of timekeeper core): */
struct tk_read_base tkr_mono;
- struct tk_read_base tkr_raw;
+
+ /* Cacheline 1: */
u64 xtime_sec;
unsigned long ktime_sec;
struct timespec64 wall_to_monotonic;
@@ -99,43 +122,28 @@ struct timekeeper {
ktime_t offs_boot;
ktime_t offs_tai;
s32 tai_offset;
+
+ /* Cacheline 2: */
+ struct tk_read_base tkr_raw;
+ u64 raw_sec;
+
+ /* Cachline 3 and 4 (timekeeping internal variables): */
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
- ktime_t next_leap_ktime;
- u64 raw_sec;
+
struct timespec64 monotonic_to_boot;
- /* The following members are for timekeeping internal use */
u64 cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
u64 raw_interval;
- /* The ntp_tick_length() value currently being used.
- * This cached copy ensures we consistently apply the tick
- * length for an entire tick, as ntp_tick_length may change
- * mid-tick, and we don't want to apply that new value to
- * the tick in progress.
- */
+
+ ktime_t next_leap_ktime;
u64 ntp_tick;
- /* Difference between accumulated time and NTP time in ntp
- * shifted nano seconds. */
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
- /* Flag used to avoid updating NTP twice with same second */
u32 skip_second_overflow;
-#ifdef CONFIG_DEBUG_TIMEKEEPING
- long last_warning;
- /*
- * These simple flag variables are managed
- * without locks, which is racy, but they are
- * ok since we don't really care about being
- * super precise about how many events were
- * seen, just that a problem was observed.
- */
- int underflow_seen;
- int overflow_seen;
-#endif
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index fc12a9ba2c88..0e035f675efe 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -45,6 +45,11 @@ extern void ktime_get_real_ts64(struct timespec64 *tv);
extern void ktime_get_coarse_ts64(struct timespec64 *ts);
extern void ktime_get_coarse_real_ts64(struct timespec64 *ts);
+/* Multigrain timestamp interfaces */
+extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts);
+extern void ktime_get_real_ts64_mg(struct timespec64 *ts);
+extern unsigned long timekeeping_get_mg_floor_swaps(void);
+
void getboottime64(struct timespec64 *ts);
/*
@@ -275,6 +280,7 @@ struct ktime_timestamps {
* counter value
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
+ * @boot: Boot time
* @raw: Monotonic raw system time
* @cs_id: Clocksource ID
* @clock_was_set_seq: The sequence number of clock-was-set events
@@ -283,6 +289,7 @@ struct ktime_timestamps {
struct system_time_snapshot {
u64 cycles;
ktime_t real;
+ ktime_t boot;
ktime_t raw;
enum clocksource_ids cs_id;
unsigned int clock_was_set_seq;
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 3871b06bd302..4ee32eff3f22 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -139,14 +139,6 @@ unsigned long random_get_entropy_fallback(void);
#define MAXSEC 2048 /* max interval between updates (s) */
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
-/*
- * kernel variables
- * Note: maximum error = NTP sync distance = dispersion + delay / 2;
- * estimated error = NTP dispersion.
- */
-extern unsigned long tick_usec; /* USER_HZ period (usec) */
-extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
-
/* Required to safely shift negative values */
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 587b96b4418e..20a40ade8030 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -421,6 +421,7 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value);
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
+void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle);
/*
* Check if TPM device is in the firmware upgrade mode.
@@ -505,6 +506,8 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
u8 attributes, u8 *passphrase,
int passphraselen);
+void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase, int passphraselen);
static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
struct tpm_buf *buf,
u8 attributes,
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 7d68a5cc5881..891368e82558 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -157,7 +157,7 @@ struct tcg_algorithm_info {
* Return: size of the event on success, 0 on failure
*/
-static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+static __always_inline u32 __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header,
bool do_mapping)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 43844510d5d0..e9c702c1908d 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -403,6 +403,103 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
+/**
+ * copy_struct_to_user: copy a struct to userspace
+ * @dst: Destination address, in userspace. This buffer must be @ksize
+ * bytes long.
+ * @usize: (Alleged) size of @dst struct.
+ * @src: Source address, in kernel space.
+ * @ksize: Size of @src struct.
+ * @ignored_trailing: Set to %true if there was a non-zero byte in @src that
+ * userspace cannot see because they are using an smaller struct.
+ *
+ * Copies a struct from kernel space to userspace, in a way that guarantees
+ * backwards-compatibility for struct syscall arguments (as long as future
+ * struct extensions are made such that all new fields are *appended* to the
+ * old struct, and zeroed-out new fields have the same meaning as the old
+ * struct).
+ *
+ * Some syscalls may wish to make sure that userspace knows about everything in
+ * the struct, and if there is a non-zero value that userspce doesn't know
+ * about, they want to return an error (such as -EMSGSIZE) or have some other
+ * fallback (such as adding a "you're missing some information" flag). If
+ * @ignored_trailing is non-%NULL, it will be set to %true if there was a
+ * non-zero byte that could not be copied to userspace (ie. was past @usize).
+ *
+ * While unconditionally returning an error in this case is the simplest
+ * solution, for maximum backward compatibility you should try to only return
+ * -EMSGSIZE if the user explicitly requested the data that couldn't be copied.
+ * Note that structure sizes can change due to header changes and simple
+ * recompilations without code changes(!), so if you care about
+ * @ignored_trailing you probably want to make sure that any new field data is
+ * associated with a flag. Otherwise you might assume that a program knows
+ * about data it does not.
+ *
+ * @ksize is just sizeof(*src), and @usize should've been passed by userspace.
+ * The recommended usage is something like the following:
+ *
+ * SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize)
+ * {
+ * int err;
+ * bool ignored_trailing;
+ * struct foo karg = {};
+ *
+ * if (usize > PAGE_SIZE)
+ * return -E2BIG;
+ * if (usize < FOO_SIZE_VER0)
+ * return -EINVAL;
+ *
+ * // ... modify karg somehow ...
+ *
+ * err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg),
+ * &ignored_trailing);
+ * if (err)
+ * return err;
+ * if (ignored_trailing)
+ * return -EMSGSIZE:
+ *
+ * // ...
+ * }
+ *
+ * There are three cases to consider:
+ * * If @usize == @ksize, then it's copied verbatim.
+ * * If @usize < @ksize, then the kernel is trying to pass userspace a newer
+ * struct than it supports. Thus we only copy the interoperable portions
+ * (@usize) and ignore the rest (but @ignored_trailing is set to %true if
+ * any of the trailing (@ksize - @usize) bytes are non-zero).
+ * * If @usize > @ksize, then the kernel is trying to pass userspace an older
+ * struct than userspace supports. In order to make sure the
+ * unknown-to-the-kernel fields don't contain garbage values, we zero the
+ * trailing (@usize - @ksize) bytes.
+ *
+ * Returns (in all cases, some data may have been copied):
+ * * -EFAULT: access to userspace failed.
+ */
+static __always_inline __must_check int
+copy_struct_to_user(void __user *dst, size_t usize, const void *src,
+ size_t ksize, bool *ignored_trailing)
+{
+ size_t size = min(ksize, usize);
+ size_t rest = max(ksize, usize) - size;
+
+ /* Double check if ksize is larger than a known object size. */
+ if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1)))
+ return -E2BIG;
+
+ /* Deal with trailing bytes. */
+ if (usize > ksize) {
+ if (clear_user(dst + size, rest))
+ return -EFAULT;
+ }
+ if (ignored_trailing)
+ *ignored_trailing = ksize < usize &&
+ memchr_inv(src + size, 0, rest) != NULL;
+ /* Copy the interoperable parts of the struct. */
+ if (copy_to_user(dst, src, size))
+ return -EFAULT;
+ return 0;
+}
+
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 3eb3f2b9a2a0..0807e21cfec9 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -56,6 +56,12 @@ struct udp_sock {
int pending; /* Any pending frames ? */
__u8 encap_type; /* Is this an Encapsulation socket? */
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ /* For UDP 4-tuple hash */
+ __u16 udp_lrpa_hash;
+ struct hlist_nulls_node udp_lrpa_node;
+#endif
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -206,6 +212,11 @@ static inline void udp_allow_gso(struct sock *sk)
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+#define udp_lrpa_for_each_entry_rcu(__up, node, list) \
+ hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
+#endif
+
#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
#endif /* _LINUX_UDP_H */
diff --git a/include/linux/unicode.h b/include/linux/unicode.h
index 4d39e6e11a95..5e6b212a2aed 100644
--- a/include/linux/unicode.h
+++ b/include/linux/unicode.h
@@ -16,6 +16,8 @@ struct utf8data_table;
((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \
((unsigned int)(REV)))
+#define UTF8_LATEST UNICODE_AGE(12, 1, 0)
+
static inline u8 unicode_major(unsigned int age)
{
return (age >> UNICODE_MAJ_SHIFT) & 0xff;
@@ -76,4 +78,6 @@ int utf8_casefold_hash(const struct unicode_map *um, const void *salt,
struct unicode_map *utf8_load(unsigned int version);
void utf8_unload(struct unicode_map *um);
+int utf8_parse_version(char *version);
+
#endif /* _LINUX_UNICODE_H */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 2b294bf1881f..e0a4c2082245 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/timer.h>
struct uprobe;
struct vm_area_struct;
@@ -23,8 +24,17 @@ struct inode;
struct notifier_block;
struct page;
+/*
+ * Allowed return values from uprobe consumer's handler callback
+ * with following meaning:
+ *
+ * UPROBE_HANDLER_REMOVE
+ * - Remove the uprobe breakpoint from current->mm.
+ * UPROBE_HANDLER_IGNORE
+ * - Ignore ret_handler callback for this consumer.
+ */
#define UPROBE_HANDLER_REMOVE 1
-#define UPROBE_HANDLER_MASK 1
+#define UPROBE_HANDLER_IGNORE 2
#define MAX_URETPROBE_DEPTH 64
@@ -37,13 +47,15 @@ struct uprobe_consumer {
* for the current process. If filter() is omitted or returns true,
* UPROBE_HANDLER_REMOVE is effectively ignored.
*/
- int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
- struct pt_regs *regs);
+ struct pt_regs *regs, __u64 *data);
bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
struct list_head cons_node;
+
+ __u64 id; /* set when uprobe_consumer is registered */
};
#ifdef CONFIG_UPROBES
@@ -56,12 +68,62 @@ enum uprobe_task_state {
UTASK_SSTEP_TRAPPED,
};
+/* The state of hybrid-lifetime uprobe inside struct return_instance */
+enum hprobe_state {
+ HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */
+ HPROBE_STABLE, /* refcounted uprobe */
+ HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */
+ HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */
+};
+
+/*
+ * Hybrid lifetime uprobe. Represents a uprobe instance that could be either
+ * SRCU protected (with SRCU protection eventually potentially timing out),
+ * refcounted using uprobe->ref, or there could be no valid uprobe (NULL).
+ *
+ * hprobe's internal state is setup such that background timer thread can
+ * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one
+ * (or no uprobe, if refcounting failed).
+ *
+ * *stable* pointer always point to the uprobe (or could be NULL if there is
+ * was no valid underlying uprobe to begin with).
+ *
+ * *leased* pointer is the key to achieving race-free atomic lifetime state
+ * transition and can have three possible states:
+ * - either the same non-NULL value as *stable*, in which case uprobe is
+ * SRCU-protected;
+ * - NULL, in which case uprobe (if there is any) is refcounted;
+ * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU
+ * protected initially, but SRCU period timed out and we attempted to
+ * convert it to refcounted, but refcount_inc_not_zero() failed, because
+ * uprobe effectively went away (the last consumer unsubscribed). In this
+ * case it's important to know that *stable* pointer (which still has
+ * non-NULL uprobe pointer) shouldn't be used, because lifetime of
+ * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an
+ * internal marker and is handled transparently by hprobe_fetch() helper.
+ *
+ * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for
+ * SRCU unlocking.
+ *
+ * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe
+ * state transitioning details. It all hinges on atomic xchg() over *leaded*
+ * pointer. *stable* pointer, once initially set, is not modified concurrently.
+ */
+struct hprobe {
+ enum hprobe_state state;
+ int srcu_idx;
+ struct uprobe *uprobe;
+};
+
/*
* uprobe_task: Metadata of a task while it singlesteps.
*/
struct uprobe_task {
enum uprobe_task_state state;
+ unsigned int depth;
+ struct return_instance *return_instances;
+
union {
struct {
struct arch_uprobe_task autask;
@@ -75,23 +137,30 @@ struct uprobe_task {
};
struct uprobe *active_uprobe;
+ struct timer_list ri_timer;
unsigned long xol_vaddr;
struct arch_uprobe *auprobe;
+};
- struct return_instance *return_instances;
- unsigned int depth;
+struct return_consumer {
+ __u64 cookie;
+ __u64 id;
};
struct return_instance {
- struct uprobe *uprobe;
+ struct hprobe hprobe;
unsigned long func;
unsigned long stack; /* stack pointer */
unsigned long orig_ret_vaddr; /* original return address */
bool chained; /* true, if instance is nested */
+ int consumers_cnt;
struct return_instance *next; /* keep as stack */
-};
+ struct rcu_head rcu;
+
+ struct return_consumer consumers[] __counted_by(consumers_cnt);
+} ____cacheline_aligned;
enum rp_check {
RP_CHECK_CALL,
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index 88d96095bcb1..bce95153e5a6 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -118,6 +118,9 @@
#define UVC_GUID_FORMAT_Y12I \
{ 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16I \
+ { 'Y', '1', '6', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_Z16 \
{ 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -140,6 +143,9 @@
#define UVC_GUID_FORMAT_D3DFMT_L8 \
{0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_D3DFMT_R5G6B5 \
+ {0x7b, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
{0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 3625096d5f85..7183e5aca282 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -141,7 +141,8 @@ static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type ty
long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
+ bool override_rlimit);
void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 306137a15d07..338e0f5efb4b 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -56,6 +56,17 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
void *ctx,
gfp_t gfp);
+int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ void *ctx,
+ gfp_t gfp);
+
+int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ gfp_t gfp);
+
int virtqueue_add_sgs(struct virtqueue *vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
@@ -82,8 +93,6 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
-int virtqueue_set_dma_premapped(struct virtqueue *_vq);
-
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index aed952d04132..f70d0958095c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -134,6 +134,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
+ SWPIN_ZERO,
+ SWPOUT_ZERO,
#ifdef CONFIG_KSM
KSM_SWPIN_COPY,
#endif
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 919d999a8c1d..b6eeb8cb6070 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -28,45 +28,21 @@
#ifndef VT_BUF_HAVE_MEMSETW
static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
{
-#ifdef VT_BUF_HAVE_RW
- count /= 2;
- while (count--)
- scr_writew(c, s++);
-#else
memset16(s, c, count / 2);
-#endif
}
#endif
#ifndef VT_BUF_HAVE_MEMCPYW
static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
{
-#ifdef VT_BUF_HAVE_RW
- count /= 2;
- while (count--)
- scr_writew(scr_readw(s++), d++);
-#else
memcpy(d, s, count);
-#endif
}
#endif
#ifndef VT_BUF_HAVE_MEMMOVEW
static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count)
{
-#ifdef VT_BUF_HAVE_RW
- if (d < s)
- scr_memcpyw(d, s, count);
- else {
- count /= 2;
- d += count;
- s += count;
- while (count--)
- scr_writew(scr_readw(--s), --d);
- }
-#else
memmove(d, s, count);
-#endif
}
#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 8aa3372f21a0..6d90ad974408 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -221,6 +221,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
+#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
@@ -541,8 +542,8 @@ do { \
int __ret = 0; \
struct hrtimer_sleeper __t; \
\
- hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
- HRTIMER_MODE_REL); \
+ hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
+ HRTIMER_MODE_REL); \
if ((timeout) != KTIME_MAX) { \
hrtimer_set_expires_range_ns(&__t.timer, timeout, \
current->timer_slack_ns); \
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 7725b7579b78..9e29d79fc790 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -8,7 +8,7 @@
#include <linux/wait.h>
struct wait_bit_key {
- void *flags;
+ unsigned long *flags;
int bit_nr;
unsigned long timeout;
};
@@ -23,14 +23,14 @@ struct wait_bit_queue_entry {
typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
-void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
+void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit);
int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
-void wake_up_bit(void *word, int bit);
-int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
-int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
-int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
-struct wait_queue_head *bit_waitqueue(void *word, int bit);
+void wake_up_bit(unsigned long *word, int bit);
+int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
+int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
+int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
+struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit);
extern void __init wait_bit_init(void);
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
@@ -49,23 +49,24 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
extern int bit_wait(struct wait_bit_key *key, int mode);
extern int bit_wait_io(struct wait_bit_key *key, int mode);
extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
-extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
/**
* wait_on_bit - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * @word: the address containing the bit being waited on
+ * @bit: the bit at that address being waited on
* @mode: the task state to sleep in
*
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit.
- * For instance, if one were to have waiters on a bitflag, one would
- * call wait_on_bit() in threads waiting for the bit to clear.
- * One uses wait_on_bit() where one is waiting for the bit to clear,
- * but has no intention of setting it.
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
+ * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
+ * to be cleared. The clearing of the bit must be signalled with
+ * wake_up_bit(), often as clear_and_wake_up_bit().
+ *
+ * The process will wait on a waitqueue selected by hash from a shared
+ * pool. It will only be woken on a wake_up for the target bit, even
+ * if other processes on the same queue are waiting for other bits.
+ *
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or %-EINTR if the process received a
+ * signal and the mode permitted wake up on that signal.
*/
static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
@@ -80,17 +81,20 @@ wait_on_bit(unsigned long *word, int bit, unsigned mode)
/**
* wait_on_bit_io - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * @word: the address containing the bit being waited on
+ * @bit: the bit at that address being waited on
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), but calls
- * io_schedule() instead of schedule() for the actual waiting.
+ * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
+ * to be cleared. The clearing of the bit must be signalled with
+ * wake_up_bit(), often as clear_and_wake_up_bit().
*
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
+ * This is similar to wait_on_bit(), but calls io_schedule() instead of
+ * schedule() for the actual waiting.
+ *
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or %-EINTR if the process received a
+ * signal and the mode permitted wake up on that signal.
*/
static inline int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
@@ -104,19 +108,24 @@ wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
}
/**
- * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse
+ * @word: the address containing the bit being waited on
+ * @bit: the bit at that address being waited on
* @mode: the task state to sleep in
* @timeout: timeout, in jiffies
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), except also takes a
- * timeout parameter.
+ * Wait for the given bit in an unsigned long or bitmap (see
+ * DECLARE_BITMAP()) to be cleared, or for a timeout to expire. The
+ * clearing of the bit must be signalled with wake_up_bit(), often as
+ * clear_and_wake_up_bit().
+ *
+ * This is similar to wait_on_bit(), except it also takes a timeout
+ * parameter.
*
- * Returned value will be zero if the bit was cleared before the
- * @timeout elapsed, or non-zero if the @timeout elapsed or process
- * received a signal and the mode permitted wakeup on that signal.
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or %-EINTR if the process received a
+ * signal and the mode permitted wake up on that signal, or %-EAGAIN if the
+ * timeout elapsed.
*/
static inline int
wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
@@ -132,19 +141,21 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
/**
* wait_on_bit_action - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * @word: the address containing the bit waited on
+ * @bit: the bit at that address being waited on
* @action: the function used to sleep, which may take special actions
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared, and allow the waiting action to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
+ * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
+ * to be cleared. The clearing of the bit must be signalled with
+ * wake_up_bit(), often as clear_and_wake_up_bit().
+ *
+ * This is similar to wait_on_bit(), but calls @action() instead of
+ * schedule() for the actual waiting.
*
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or the error code returned by @action if
+ * that call returned non-zero.
*/
static inline int
wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
@@ -157,23 +168,22 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
}
/**
- * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_lock - wait for a bit to be cleared, then set it
+ * @word: the address containing the bit being waited on
+ * @bit: the bit of the word being waited on and set
* @mode: the task state to sleep in
*
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit
- * when one intends to set it, for instance, trying to lock bitflags.
- * For instance, if one were to have waiters trying to set bitflag
- * and waiting for it to clear before setting it, one would call
- * wait_on_bit() in threads waiting to be able to set the bit.
- * One uses wait_on_bit_lock() where one is waiting for the bit to
- * clear with the intention of setting it, and when done, clearing it.
+ * Wait for the given bit in an unsigned long or bitmap (see
+ * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be
+ * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As
+ * soon as it is clear, atomically set it and return.
*
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
+ * This is similar to wait_on_bit(), but sets the bit before returning.
+ *
+ * Returned value will be zero if the bit was successfully set in which
+ * case the call has the same memory sequencing semantics as
+ * test_and_clear_bit(), or %-EINTR if the process received a signal and
+ * the mode permitted wake up on that signal.
*/
static inline int
wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
@@ -185,15 +195,18 @@ wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
}
/**
- * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_lock_io - wait for a bit to be cleared, then set it
+ * @word: the address containing the bit being waited on
+ * @bit: the bit of the word being waited on and set
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to atomically set it. This is similar
- * to wait_on_bit(), but calls io_schedule() instead of schedule()
- * for the actual waiting.
+ * Wait for the given bit in an unsigned long or bitmap (see
+ * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be
+ * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As
+ * soon as it is clear, atomically set it and return.
+ *
+ * This is similar to wait_on_bit_lock(), but calls io_schedule() instead
+ * of schedule().
*
* Returns zero if the bit was (eventually) found to be clear and was
* set. Returns non-zero if a signal was delivered to the process and
@@ -209,21 +222,19 @@ wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
}
/**
- * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_lock_action - wait for a bit to be cleared, then set it
+ * @word: the address containing the bit being waited on
+ * @bit: the bit of the word being waited on and set
* @action: the function used to sleep, which may take special actions
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to set it, and allow the waiting action
- * to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
+ * This is similar to wait_on_bit_lock(), but calls @action() instead of
+ * schedule() for the actual waiting.
*
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
+ * Returned value will be zero if the bit was successfully set in which
+ * case the call has the same memory sequencing semantics as
+ * test_and_clear_bit(), or the error code returned by @action if that
+ * call returned non-zero.
*/
static inline int
wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
@@ -269,7 +280,26 @@ __out: __ret; \
#define __wait_var_event(var, condition) \
___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
schedule())
+#define __wait_var_event_io(var, condition) \
+ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ io_schedule())
+/**
+ * wait_var_event - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for a @condition to be true, only re-checking when a wake up is
+ * received for the given @var (an arbitrary kernel address which need
+ * not be directly related to the given condition, but usually is).
+ *
+ * The process will wait on a waitqueue selected by hash from a shared
+ * pool. It will only be woken on a wake_up for the given address.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event(var, condition) \
do { \
might_sleep(); \
@@ -278,10 +308,56 @@ do { \
__wait_var_event(var, condition); \
} while (0)
+/**
+ * wait_var_event_io - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for an IO related @condition to be true, only re-checking when a
+ * wake up is received for the given @var (an arbitrary kernel address
+ * which need not be directly related to the given condition, but
+ * usually is).
+ *
+ * The process will wait on a waitqueue selected by hash from a shared
+ * pool. It will only be woken on a wake_up for the given address.
+ *
+ * This is similar to wait_var_event(), but calls io_schedule() instead
+ * of schedule().
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
+#define wait_var_event_io(var, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __wait_var_event_io(var, condition); \
+} while (0)
+
#define __wait_var_event_killable(var, condition) \
___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
schedule())
+/**
+ * wait_var_event_killable - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for a @condition to be true or a fatal signal to be received,
+ * only re-checking the condition when a wake up is received for the given
+ * @var (an arbitrary kernel address which need not be directly related
+ * to the given condition, but usually is).
+ *
+ * This is similar to wait_var_event() but returns a value which is
+ * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal
+ * was received.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event_killable(var, condition) \
({ \
int __ret = 0; \
@@ -296,6 +372,26 @@ do { \
TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
+/**
+ * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ * @timeout: maximum time to wait in jiffies
+ *
+ * Wait for a @condition to be true or a timeout to expire, only
+ * re-checking the condition when a wake up is received for the given
+ * @var (an arbitrary kernel address which need not be directly related
+ * to the given condition, but usually is).
+ *
+ * This is similar to wait_var_event() but returns a value which is 0 if
+ * the timeout expired and the condition was still false, or the
+ * remaining time left in the timeout (but at least 1) if the condition
+ * was found to be true.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event_timeout(var, condition, timeout) \
({ \
long __ret = timeout; \
@@ -309,6 +405,23 @@ do { \
___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule())
+/**
+ * wait_var_event_killable - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for a @condition to be true or a signal to be received, only
+ * re-checking the condition when a wake up is received for the given
+ * @var (an arbitrary kernel address which need not be directly related
+ * to the given condition, but usually is).
+ *
+ * This is similar to wait_var_event() but returns a value which is 0 if
+ * the condition became true, or %-ERESTARTSYS if a signal was received.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event_interruptible(var, condition) \
({ \
int __ret = 0; \
@@ -319,15 +432,122 @@ do { \
})
/**
- * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
+ * wait_var_event_any_lock - wait for a variable to be updated under a lock
+ * @var: the address of the variable being waited on
+ * @condition: condition to wait for
+ * @lock: the object that is locked to protect updates to the variable
+ * @type: prefix on lock and unlock operations
+ * @state: waiting state, %TASK_UNINTERRUPTIBLE etc.
+ *
+ * Wait for a condition which can only be reliably tested while holding
+ * a lock. The variables assessed in the condition will normal be updated
+ * under the same lock, and the wake up should be signalled with
+ * wake_up_var_locked() under the same lock.
+ *
+ * This is similar to wait_var_event(), but assumes a lock is held
+ * while calling this function and while updating the variable.
*
+ * This must be called while the given lock is held and the lock will be
+ * dropped when schedule() is called to wait for a wake up, and will be
+ * reclaimed before testing the condition again. The functions used to
+ * unlock and lock the object are constructed by appending _unlock and _lock
+ * to @type.
+ *
+ * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt
+ * the wait according to @state.
+ */
+#define wait_var_event_any_lock(var, condition, lock, type, state) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = ___wait_var_event(var, condition, state, 0, 0, \
+ type ## _unlock(lock); \
+ schedule(); \
+ type ## _lock(lock)); \
+ __ret; \
+})
+
+/**
+ * wait_var_event_spinlock - wait for a variable to be updated under a spinlock
+ * @var: the address of the variable being waited on
+ * @condition: condition to wait for
+ * @lock: the spinlock which protects updates to the variable
+ *
+ * Wait for a condition which can only be reliably tested while holding
+ * a spinlock. The variables assessed in the condition will normal be updated
+ * under the same spinlock, and the wake up should be signalled with
+ * wake_up_var_locked() under the same spinlock.
+ *
+ * This is similar to wait_var_event(), but assumes a spinlock is held
+ * while calling this function and while updating the variable.
+ *
+ * This must be called while the given lock is held and the lock will be
+ * dropped when schedule() is called to wait for a wake up, and will be
+ * reclaimed before testing the condition again.
+ */
+#define wait_var_event_spinlock(var, condition, lock) \
+ wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE)
+
+/**
+ * wait_var_event_mutex - wait for a variable to be updated under a mutex
+ * @var: the address of the variable being waited on
+ * @condition: condition to wait for
+ * @mutex: the mutex which protects updates to the variable
+ *
+ * Wait for a condition which can only be reliably tested while holding
+ * a mutex. The variables assessed in the condition will normal be
+ * updated under the same mutex, and the wake up should be signalled
+ * with wake_up_var_locked() under the same mutex.
+ *
+ * This is similar to wait_var_event(), but assumes a mutex is held
+ * while calling this function and while updating the variable.
+ *
+ * This must be called while the given mutex is held and the mutex will be
+ * dropped when schedule() is called to wait for a wake up, and will be
+ * reclaimed before testing the condition again.
+ */
+#define wait_var_event_mutex(var, condition, lock) \
+ wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE)
+
+/**
+ * wake_up_var_protected - wake up waiters for a variable asserting that it is safe
+ * @var: the address of the variable being waited on
+ * @cond: the condition which afirms this is safe
+ *
+ * When waking waiters which use wait_var_event_any_lock() the waker must be
+ * holding the reelvant lock to avoid races. This version of wake_up_var()
+ * asserts that the relevant lock is held and so no barrier is needed.
+ * The @cond is only tested when CONFIG_LOCKDEP is enabled.
+ */
+#define wake_up_var_protected(var, cond) \
+do { \
+ lockdep_assert(cond); \
+ wake_up_var(var); \
+} while (0)
+
+/**
+ * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex
+ * @var: the address of the variable being waited on
+ * @lock: The spinlock or mutex what protects the variable
+ *
+ * Send a wake up for the given variable which should be waited for with
+ * wait_var_event_spinlock() or wait_var_event_mutex(). Unlike wake_up_var(),
+ * no extra barriers are needed as the locking provides sufficient sequencing.
+ */
+#define wake_up_var_locked(var, lock) \
+ wake_up_var_protected(var, lockdep_is_held(lock))
+
+/**
+ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
* @bit: the bit of the word being waited on
- * @word: the word being waited on, a kernel virtual address
+ * @word: the address containing the bit being waited on
*
- * You can use this helper if bitflags are manipulated atomically rather than
- * non-atomically under a lock.
+ * The designated bit is cleared and any tasks waiting in wait_on_bit()
+ * or similar will be woken. This call has RELEASE semantics so that
+ * any changes to memory made before this call are guaranteed to be visible
+ * after the corresponding wait_on_bit() completes.
*/
-static inline void clear_and_wake_up_bit(int bit, void *word)
+static inline void clear_and_wake_up_bit(int bit, unsigned long *word)
{
clear_bit_unlock(bit, word);
/* See wake_up_bit() for which memory barrier you need to use. */
@@ -335,4 +555,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word)
wake_up_bit(word, bit);
}
+/**
+ * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
+ * @bit: the bit of the word being waited on
+ * @word: the address of memory containing that bit
+ *
+ * If the bit is set and can be atomically cleared, any tasks waiting in
+ * wait_on_bit() or similar will be woken. This call has the same
+ * complete ordering semantics as test_and_clear_bit(). Any changes to
+ * memory made before this call are guaranteed to be visible after the
+ * corresponding wait_on_bit() completes.
+ *
+ * Returns %true if the bit was successfully set and the wake up was sent.
+ */
+static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
+{
+ if (!test_and_clear_bit(bit, word))
+ return false;
+ /* no extra barrier required */
+ wake_up_bit(word, bit);
+ return true;
+}
+
+/**
+ * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
+ * @var: the variable to dec and test
+ *
+ * Decrements the atomic variable and if it reaches zero, send a wake_up to any
+ * processes waiting on the variable.
+ *
+ * This function has the same complete ordering semantics as atomic_dec_and_test.
+ *
+ * Returns %true is the variable reaches zero and the wake up was sent.
+ */
+
+static inline bool atomic_dec_and_wake_up(atomic_t *var)
+{
+ if (!atomic_dec_and_test(var))
+ return false;
+ /* No extra barrier required */
+ wake_up_var(var);
+ return true;
+}
+
+/**
+ * store_release_wake_up - update a variable and send a wake_up
+ * @var: the address of the variable to be updated and woken
+ * @val: the value to store in the variable.
+ *
+ * Store the given value in the variable send a wake up to any tasks
+ * waiting on the variable. All necessary barriers are included to ensure
+ * the task calling wait_var_event() sees the new value and all values
+ * written to memory before this call.
+ */
+#define store_release_wake_up(var, val) \
+do { \
+ smp_store_release(var, val); \
+ smp_mb(); \
+ wake_up_var(var); \
+} while (0)
+
#endif /* _LINUX_WAIT_BIT_H */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index e6e34d74dda0..03e5d3fe226d 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -21,8 +21,7 @@ struct compat_iw_point {
__u16 length;
__u16 flags;
};
-#endif
-#ifdef CONFIG_COMPAT
+
struct __compat_iw_event {
__u16 len; /* Real length of this stuff */
__u16 cmd; /* Wireless IOCTL */
@@ -49,5 +48,5 @@ struct __compat_iw_event {
#define IW_EV_COMPAT_POINT_LEN \
(IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \
IW_EV_COMPAT_POINT_OFF)
-#endif
+#endif /* CONFIG_COMPAT */
#endif /* _LINUX_WIRELESS_H */
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 3275470b5531..10751c8e5e6a 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -34,7 +34,7 @@ struct wmi_device {
*
* Cast a struct device to a struct wmi_device.
*/
-#define to_wmi_device(device) container_of(device, struct wmi_device, dev)
+#define to_wmi_device(device) container_of_const(device, struct wmi_device, dev)
extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
u8 instance, u32 method_id,
@@ -56,6 +56,7 @@ u8 wmidev_instance_count(struct wmi_device *wdev);
* @no_singleton: Driver can be instantiated multiple times
* @probe: Callback for device binding
* @remove: Callback for device unbinding
+ * @shutdown: Callback for device shutdown
* @notify: Callback for receiving WMI events
*
* This represents WMI drivers which handle WMI devices.
@@ -68,9 +69,18 @@ struct wmi_driver {
int (*probe)(struct wmi_device *wdev, const void *context);
void (*remove)(struct wmi_device *wdev);
+ void (*shutdown)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
};
+/**
+ * to_wmi_driver() - Helper macro to cast a driver to a wmi_driver
+ * @drv: driver struct
+ *
+ * Cast a struct device_driver to a struct wmi_driver.
+ */
+#define to_wmi_driver(drv) container_of_const(drv, struct wmi_driver, driver)
+
extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
struct module *owner);
extern void wmi_driver_unregister(struct wmi_driver *driver);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 59c2695e12e7..b0dc957c3e56 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -412,7 +412,7 @@ enum wq_flags {
};
enum wq_consts {
- WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+ WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */
WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index d6db822e4bb3..d11b903c2edb 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -213,11 +213,8 @@ static inline void wait_on_inode(struct inode *inode)
#include <linux/bio.h>
void __inode_attach_wb(struct inode *inode, struct folio *folio);
-void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
- __releases(&inode->i_lock);
void wbc_detach_inode(struct writeback_control *wbc);
-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
+void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
size_t bytes);
int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done);
@@ -254,22 +251,8 @@ static inline void inode_detach_wb(struct inode *inode)
}
}
-/**
- * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
- * @wbc: writeback_control of interest
- * @inode: target inode
- *
- * This function is to be used by __filemap_fdatawrite_range(), which is an
- * alternative entry point into writeback code, and first ensures @inode is
- * associated with a bdi_writeback and attaches it to @wbc.
- */
-static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
- struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode_attach_wb(inode, NULL);
- wbc_attach_and_unlock_inode(wbc, inode);
-}
+void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode);
/**
* wbc_init_bio - writeback specific initializtion of bio
@@ -303,13 +286,6 @@ static inline void inode_detach_wb(struct inode *inode)
{
}
-static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
- __releases(&inode->i_lock)
-{
- spin_unlock(&inode->i_lock);
-}
-
static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
struct inode *inode)
{
@@ -324,7 +300,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
}
static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
- struct page *page, size_t bytes)
+ struct folio *folio, size_t bytes)
{
}
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index bb763085479a..a401a2f31a77 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -65,6 +65,16 @@ struct ww_acquire_ctx {
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
+ /**
+ * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex.
+ *
+ * lockdep requires the lockdep_map for the first locked ww_mutex
+ * in a ww transaction to remain in memory until all ww_mutexes of
+ * the transaction have been unlocked. Ensure this by keeping a
+ * fake locked ww_mutex lockdep map between ww_acquire_init() and
+ * ww_acquire_fini().
+ */
+ struct lockdep_map first_lock_dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned int deadlock_inject_interval;
@@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
+ lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name,
+ &ww_class->mutex_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
+ mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
@@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
mutex_release(&ctx->dep_map, _THIS_IP_);
#endif
#ifdef DEBUG_WW_MUTEXES
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 170fdee6339c..79c781875c09 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -17,6 +17,8 @@
* @WWAN_PORT_FIREHOSE: XML based command protocol
* @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems
* @WWAN_PORT_FASTBOOT: Fastboot protocol control
+ * @WWAN_PORT_ADB: ADB protocol control
+ * @WWAN_PORT_MIPC: MTK MIPC diagnostic interface
*
* @WWAN_PORT_MAX: Highest supported port types
* @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
@@ -30,6 +32,8 @@ enum wwan_port_type {
WWAN_PORT_FIREHOSE,
WWAN_PORT_XMMRPC,
WWAN_PORT_FASTBOOT,
+ WWAN_PORT_ADB,
+ WWAN_PORT_MIPC,
/* Add new port types above this line */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d20051865800..86b0d47984a1 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -19,6 +19,10 @@
#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
+/* List of all open_how "versions". */
+#define XATTR_ARGS_SIZE_VER0 16 /* sizeof first published struct */
+#define XATTR_ARGS_SIZE_LATEST XATTR_ARGS_SIZE_VER0
+
struct inode;
struct dentry;
diff --git a/include/media/i2c/mt9p031.h b/include/media/i2c/mt9p031.h
deleted file mode 100644
index f933cd0be8e5..000000000000
--- a/include/media/i2c/mt9p031.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef MT9P031_H
-#define MT9P031_H
-
-struct v4l2_subdev;
-
-/*
- * struct mt9p031_platform_data - MT9P031 platform data
- * @ext_freq: Input clock frequency
- * @target_freq: Pixel clock frequency
- */
-struct mt9p031_platform_data {
- unsigned int pixclk_pol:1;
- int ext_freq;
- int target_freq;
-};
-
-#endif
diff --git a/include/media/i2c/ths7303.h b/include/media/i2c/ths7303.h
index fee2818c558d..fc937025cdb4 100644
--- a/include/media/i2c/ths7303.h
+++ b/include/media/i2c/ths7303.h
@@ -5,7 +5,7 @@
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates.
*
* Contributors:
- * Hans Verkuil <hans.verkuil@cisco.com>
+ * Hans Verkuil <hansverk@cisco.com>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
*/
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 0393b23129eb..64cf590b1134 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -1143,10 +1143,10 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
/**
* media_pipeline_start - Mark a pipeline as streaming
- * @pad: Starting pad
+ * @origin: Starting pad
* @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
- * Mark all pads connected to a given pad through enabled links, either
+ * Mark all pads connected to pad @origin through enabled links, either
* directly or indirectly, as streaming. The given pipeline object is assigned
* to every pad in the pipeline and stored in the media_pad pipe field.
*
@@ -1155,17 +1155,17 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
* pipeline pointer must be identical for all nested calls to
* media_pipeline_start().
*/
-__must_check int media_pipeline_start(struct media_pad *pad,
+__must_check int media_pipeline_start(struct media_pad *origin,
struct media_pipeline *pipe);
/**
* __media_pipeline_start - Mark a pipeline as streaming
*
- * @pad: Starting pad
+ * @origin: Starting pad
* @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
* ..note:: This is the non-locking version of media_pipeline_start()
*/
-__must_check int __media_pipeline_start(struct media_pad *pad,
+__must_check int __media_pipeline_start(struct media_pad *origin,
struct media_pipeline *pipe);
/**
diff --git a/include/media/media-request.h b/include/media/media-request.h
index 3cd25a2717ce..d4ac557678a7 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -5,7 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2018 Intel Corporation
*
- * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Hans Verkuil <hansverk@cisco.com>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
*/
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index d82dfdbf6e58..1b6222fab24e 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -62,6 +62,7 @@ struct v4l2_ioctl_callbacks;
struct video_device;
struct v4l2_device;
struct v4l2_ctrl_handler;
+struct dentry;
/**
* enum v4l2_video_device_flags - Flags used by &struct video_device
@@ -539,6 +540,20 @@ static inline int video_is_registered(struct video_device *vdev)
return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
}
+/**
+ * v4l2_debugfs_root - returns the dentry of the top-level "v4l2" debugfs dir
+ *
+ * If this directory does not yet exist, then it will be created.
+ */
+#ifdef CONFIG_DEBUG_FS
+struct dentry *v4l2_debugfs_root(void);
+#else
+static inline struct dentry *v4l2_debugfs_root(void)
+{
+ return NULL;
+}
+#endif
+
#if defined(CONFIG_MEDIA_CONTROLLER)
/**
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 8fa963326bf6..ff07dc6b103c 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -8,6 +8,7 @@
#ifndef __V4L2_DV_TIMINGS_H
#define __V4L2_DV_TIMINGS_H
+#include <linux/debugfs.h>
#include <linux/videodev2.h>
/**
@@ -146,15 +147,18 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
* @polarities: the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced: if this flag is true, it indicates interlaced format
+ * @cap: the v4l2_dv_timings_cap capabilities.
* @fmt: the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid CVT format. If so, then it will return true, and fmt will be filled
* in with the found CVT timings.
*/
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- unsigned active_width, u32 polarities, bool interlaced,
- struct v4l2_dv_timings *fmt);
+bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq,
+ unsigned int vsync, unsigned int active_width,
+ u32 polarities, bool interlaced,
+ const struct v4l2_dv_timings_cap *cap,
+ struct v4l2_dv_timings *fmt);
/**
* v4l2_detect_gtf - detect if the given timings follow the GTF standard
@@ -170,15 +174,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
* image height, so it has to be passed explicitly. Usually
* the native screen aspect ratio is used for this. If it
* is not filled in correctly, then 16:9 will be assumed.
+ * @cap: the v4l2_dv_timings_cap capabilities.
* @fmt: the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid GTF format. If so, then it will return true, and fmt will be filled
* in with the found GTF timings.
*/
-bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, bool interlaced, struct v4l2_fract aspect,
- struct v4l2_dv_timings *fmt);
+bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq,
+ unsigned int vsync, u32 polarities, bool interlaced,
+ struct v4l2_fract aspect,
+ const struct v4l2_dv_timings_cap *cap,
+ struct v4l2_dv_timings *fmt);
/**
* v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
@@ -251,4 +258,51 @@ void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+/* Add support for exporting InfoFrames to debugfs */
+
+/*
+ * HDMI InfoFrames start with a 3 byte header, then a checksum,
+ * followed by the actual IF payload.
+ *
+ * The payload length is limited to 30 bytes according to the HDMI spec,
+ * but since the length is encoded in 5 bits, it can be 31 bytes theoretically.
+ * So set the max length as 31 + 3 (header) + 1 (checksum) = 35.
+ */
+#define V4L2_DEBUGFS_IF_MAX_LEN (35)
+
+#define V4L2_DEBUGFS_IF_AVI BIT(0)
+#define V4L2_DEBUGFS_IF_AUDIO BIT(1)
+#define V4L2_DEBUGFS_IF_SPD BIT(2)
+#define V4L2_DEBUGFS_IF_HDMI BIT(3)
+
+typedef ssize_t (*v4l2_debugfs_if_read_t)(u32 type, void *priv,
+ struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos);
+
+struct v4l2_debugfs_if {
+ struct dentry *if_dir;
+ void *priv;
+
+ v4l2_debugfs_if_read_t if_read;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
+ void *priv,
+ v4l2_debugfs_if_read_t if_read);
+void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes);
+#else
+static inline
+struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
+ void *priv,
+ v4l2_debugfs_if_read_t if_read)
+{
+ return NULL;
+}
+
+static inline void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes)
+{
+}
+#endif
+
#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 8daa0929865c..2f2200875b03 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -450,8 +450,9 @@ enum v4l2_subdev_pre_streamon_flags {
* already started or stopped subdev. Also see call_s_stream wrapper in
* v4l2-subdev.c.
*
- * New drivers should instead implement &v4l2_subdev_pad_ops.enable_streams
- * and &v4l2_subdev_pad_ops.disable_streams operations, and use
+ * This callback is DEPRECATED. New drivers should instead implement
+ * &v4l2_subdev_pad_ops.enable_streams and
+ * &v4l2_subdev_pad_ops.disable_streams operations, and use
* v4l2_subdev_s_stream_helper for the &v4l2_subdev_video_ops.s_stream
* operation to support legacy users.
*
@@ -833,11 +834,19 @@ struct v4l2_subdev_state {
* v4l2_subdev_init_finalize() at initialization time). Do not call
* directly, use v4l2_subdev_enable_streams() instead.
*
+ * Drivers that support only a single stream without setting the
+ * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask
+ * argument.
+ *
* @disable_streams: Disable the streams defined in streams_mask on the given
* source pad. Subdevs that implement this operation must use the active
* state management provided by the subdev core (enabled through a call to
* v4l2_subdev_init_finalize() at initialization time). Do not call
* directly, use v4l2_subdev_disable_streams() instead.
+ *
+ * Drivers that support only a single stream without setting the
+ * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask
+ * argument.
*/
struct v4l2_subdev_pad_ops {
int (*enum_mbus_code)(struct v4l2_subdev *sd,
@@ -1676,6 +1685,8 @@ int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
* function implements a best-effort compatibility by calling the .s_stream()
* operation, limited to subdevs that have a single source pad.
*
+ * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0).
+ *
* Return:
* * 0: Success
* * -EALREADY: One of the streams in streams_mask is already enabled
@@ -1706,6 +1717,8 @@ int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
* function implements a best-effort compatibility by calling the .s_stream()
* operation, limited to subdevs that have a single source pad.
*
+ * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0).
+ *
* Return:
* * 0: Success
* * -EALREADY: One of the streams in streams_mask is not enabled
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 77ee0c657e2c..404df8557f6a 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -219,7 +219,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref, bool terse);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index bab1e3d7452a..6203bd8663b7 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -29,6 +29,7 @@
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
#define HCI_MAX_ISO_SIZE 251
+#define HCI_MAX_ISO_BIS 31
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
@@ -67,6 +68,7 @@
#define HCI_I2C 8
#define HCI_SMD 9
#define HCI_VIRTIO 10
+#define HCI_IPC 11
/* HCI device quirks */
enum {
@@ -300,6 +302,20 @@ enum {
*/
HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+ /*
+ * When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is
+ * disabled. This is required for the Actions Semiconductor ATS2851
+ * based controllers, which erroneously claims to support it.
+ */
+ HCI_QUIRK_BROKEN_EXT_CREATE_CONN,
+
+ /*
+ * When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is
+ * skipped. This is required for the Actions Semiconductor ATS2851
+ * based controllers, due to a race condition in pairing process.
+ */
+ HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
+
/* When this quirk is set, MSFT extension monitor tracking by
* address filter is supported. Since tracking quantity of each
* pattern is limited, this feature supports tracking multiple
@@ -683,6 +699,7 @@ enum {
#define HCI_RSSI_INVALID 127
#define HCI_SYNC_HANDLE_INVALID 0xffff
+#define HCI_SID_INVALID 0xff
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 88265d37aa72..ea798f07c5a2 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -668,6 +668,7 @@ struct hci_conn {
__u8 adv_instance;
__u16 handle;
__u16 sync_handle;
+ __u8 sid;
__u16 state;
__u16 mtu;
__u8 mode;
@@ -710,6 +711,9 @@ struct hci_conn {
__s8 tx_power;
__s8 max_tx_power;
struct bt_iso_qos iso_qos;
+ __u8 num_bis;
+ __u8 bis[HCI_MAX_ISO_BIS];
+
unsigned long flags;
enum conn_reasons conn_reason;
@@ -945,8 +949,10 @@ enum {
HCI_CONN_PER_ADV,
HCI_CONN_BIG_CREATED,
HCI_CONN_CREATE_CIS,
+ HCI_CONN_CREATE_BIG_SYNC,
HCI_CONN_BIG_SYNC,
HCI_CONN_BIG_SYNC_FAILED,
+ HCI_CONN_CREATE_PA_SYNC,
HCI_CONN_PA_SYNC,
HCI_CONN_PA_SYNC_FAILED,
};
@@ -1099,6 +1105,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
+ __u8 sid,
+ bdaddr_t *dst,
+ __u8 dst_type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK || bacmp(&c->dst, dst) ||
+ c->dst_type != dst_type || c->sid != sid)
+ continue;
+
+ rcu_read_unlock();
+ return c;
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *
hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
bdaddr_t *ba,
@@ -1255,7 +1285,17 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
+ if (c->type != ISO_LINK)
+ continue;
+
+ /* An ISO_LINK hcon with BDADDR_ANY as destination
+ * address is a Broadcast connection. A Broadcast
+ * slave connection is associated with a PA train,
+ * so the sync_handle can be used to differentiate
+ * from unicast.
+ */
+ if (bacmp(&c->dst, BDADDR_ANY) &&
+ c->sync_handle == HCI_SYNC_HANDLE_INVALID)
continue;
if (handle == c->iso_qos.bcast.big) {
@@ -1270,6 +1310,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
}
static inline struct hci_conn *
+hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
+ __u8 handle, __u8 num_bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1328,6 +1392,13 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
if (c->type != ISO_LINK)
continue;
+ /* Ignore the listen hcon, we are looking
+ * for the child hcon that was created as
+ * a result of the PA sync established event.
+ */
+ if (c->state == BT_LISTEN)
+ continue;
+
if (c->sync_handle == sync_handle) {
rcu_read_unlock();
return c;
@@ -1445,6 +1516,8 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
int hci_le_create_cis_pending(struct hci_dev *hdev);
+int hci_pa_create_sync_pending(struct hci_dev *hdev);
+int hci_le_big_create_sync_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -1871,8 +1944,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
!test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
/* Use ext create connection if command is supported */
-#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
-
+#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \
+ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks))
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
@@ -1885,8 +1958,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
* C24: Mandatory if the LE Controller supports Connection State and either
* LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
*/
-#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
- ext_adv_capable(dev))
+#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \
+ ext_adv_capable(dev)) && \
+ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \
+ &(dev)->quirks))
/* Periodic advertising support */
#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d382679efd2b..affac861efdc 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -878,6 +878,16 @@ struct mgmt_cp_mesh_send_cancel {
} __packed;
#define MGMT_MESH_SEND_CANCEL_SIZE 1
+#define MGMT_OP_HCI_CMD_SYNC 0x005B
+struct mgmt_cp_hci_cmd_sync {
+ __le16 opcode;
+ __u8 event;
+ __u8 timeout;
+ __le16 params_len;
+ __u8 params[];
+} __packed;
+#define MGMT_HCI_CMD_SYNC_SIZE 6
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 473a0147769e..18687ccf0638 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -161,5 +161,7 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond);
#if IS_ENABLED(CONFIG_IPV6)
void bond_option_ns_ip6_targets_clear(struct bonding *bond);
#endif
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave);
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave);
#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f03040baaefd..c858270141bc 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -52,6 +52,9 @@ void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+void napi_suspend_irqs(unsigned int napi_id);
+void napi_resume_irqs(unsigned int napi_id);
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 5ee7b322e18b..a000dc45f966 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -40,7 +40,6 @@ void cfsrvl_init(struct cfsrvl *service,
struct dev_info *dev_info,
bool supports_flowctrl);
bool cfsrvl_ready(struct cfsrvl *service, int *err);
-u8 cfsrvl_getphyid(struct cflayer *layer);
static inline void cfsrvl_get(struct cflayer *layr)
{
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 941dc62f3027..27acf1292a5c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1460,7 +1460,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
- * @smps_mode: SMPS mode
* @inactivity_timeout: time in seconds to determine station's inactivity.
* @p2p_ctwindow: P2P CT Window
* @p2p_opp_ps: P2P opportunistic PS
@@ -1498,7 +1497,6 @@ struct cfg80211_ap_settings {
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
- enum nl80211_smps_mode smps_mode;
int inactivity_timeout;
u8 p2p_ctwindow;
bool p2p_opp_ps;
@@ -2269,6 +2267,7 @@ static inline int cfg80211_get_station(struct net_device *dev,
* @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
* @MONITOR_FLAG_COOK_FRAMES: report frames after processing
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
+ * @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames
*/
enum monitor_flags {
MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID),
@@ -2278,6 +2277,7 @@ enum monitor_flags {
MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS),
MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES),
MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE),
+ MONITOR_FLAG_SKIP_TX = BIT(NL80211_MNTR_FLAG_SKIP_TX),
};
/**
@@ -4696,6 +4696,7 @@ struct cfg80211_ops {
struct ieee80211_channel *chan);
int (*set_monitor_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef);
int (*scan)(struct wiphy *wiphy,
@@ -5436,6 +5437,8 @@ struct wiphy_radio_freq_range {
* @iface_combinations: Valid interface combinations array, should not
* list single interface types.
* @n_iface_combinations: number of entries in @iface_combinations array.
+ *
+ * @antenna_mask: bitmask of antennas connected to this radio.
*/
struct wiphy_radio {
const struct wiphy_radio_freq_range *freq_range;
@@ -5443,6 +5446,8 @@ struct wiphy_radio {
const struct ieee80211_iface_combination *iface_combinations;
int n_iface_combinations;
+
+ u32 antenna_mask;
};
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
@@ -6267,6 +6272,7 @@ enum ieee80211_ap_reg_power {
* entered.
* @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
+ * @radio_mask: Bitmask of radios that this interface is allowed to operate on.
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -6379,6 +6385,8 @@ struct wireless_dev {
unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
+
+ u32 radio_mask;
};
static inline const u8 *wdev_address(struct wireless_dev *wdev)
@@ -6565,6 +6573,17 @@ bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
const struct cfg80211_chan_def *chandef);
/**
+ * cfg80211_wdev_channel_allowed - Check if the wdev may use the channel
+ *
+ * @wdev: the wireless device
+ * @chan: channel to check
+ *
+ * Return: whether or not the wdev may use the channel
+ */
+bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan);
+
+/**
* ieee80211_get_response_rate - get basic rate for a given rate
*
* @sband: the band to look for rates in
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 1338cb92c8e7..243f972267b8 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -151,6 +151,12 @@ static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
*csum = csum_add(csum_sub(*csum, old), new);
}
+static inline unsigned short csum_from32to16(unsigned int sum)
+{
+ sum += (sum >> 16) | (sum << 16);
+ return (unsigned short)(sum >> 16);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index db5eff6cb60f..fbb9a2668e24 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -1779,12 +1779,6 @@ int devl_resource_register(struct devlink *devlink,
u64 resource_id,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params);
-int devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params);
void devl_resources_unregister(struct devlink *devlink);
void devlink_resources_unregister(struct devlink *devlink);
int devl_resource_size_get(struct devlink *devlink,
@@ -1797,15 +1791,8 @@ void devl_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv);
-void devlink_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv);
void devl_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
-
-void devlink_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id);
int devl_params_register(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 4748680e8c88..6c5a1ea209a2 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -76,6 +76,10 @@
FN(INVALID_PROTO) \
FN(IP_INADDRERRORS) \
FN(IP_INNOROUTES) \
+ FN(IP_LOCAL_SOURCE) \
+ FN(IP_INVALID_SOURCE) \
+ FN(IP_LOCALNET) \
+ FN(IP_INVALID_DEST) \
FN(PKT_TOO_BIG) \
FN(DUP_FRAG) \
FN(FRAG_REASM_TIMEOUT) \
@@ -92,6 +96,15 @@
FN(PACKET_SOCK_ERROR) \
FN(TC_CHAIN_NOTFOUND) \
FN(TC_RECLASSIFY_LOOP) \
+ FN(VXLAN_INVALID_HDR) \
+ FN(VXLAN_VNI_NOT_FOUND) \
+ FN(MAC_INVALID_SOURCE) \
+ FN(VXLAN_ENTRY_EXISTS) \
+ FN(VXLAN_NO_REMOTE) \
+ FN(IP_TUNNEL_ECN) \
+ FN(TUNNEL_TXINFO) \
+ FN(LOCAL_MAC) \
+ FN(ARP_PVLAN_DISABLE) \
FNe(MAX)
/**
@@ -365,6 +378,21 @@ enum skb_drop_reason {
* IPSTATS_MIB_INADDRERRORS
*/
SKB_DROP_REASON_IP_INNOROUTES,
+ /** @SKB_DROP_REASON_IP_LOCAL_SOURCE: the source ip is local */
+ SKB_DROP_REASON_IP_LOCAL_SOURCE,
+ /**
+ * @SKB_DROP_REASON_IP_INVALID_SOURCE: the source ip is invalid:
+ * 1) source ip is multicast or limited broadcast
+ * 2) source ip is zero and not IGMP
+ */
+ SKB_DROP_REASON_IP_INVALID_SOURCE,
+ /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */
+ SKB_DROP_REASON_IP_LOCALNET,
+ /**
+ * @SKB_DROP_REASON_IP_INVALID_DEST: the dest ip is invalid:
+ * 1) dest ip is 0
+ */
+ SKB_DROP_REASON_IP_INVALID_DEST,
/**
* @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the
* MTU)
@@ -419,6 +447,44 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
/**
+ * @SKB_DROP_REASON_VXLAN_INVALID_HDR: VXLAN header is invalid. E.g.:
+ * 1) reserved fields are not zero
+ * 2) "I" flag is not set
+ */
+ SKB_DROP_REASON_VXLAN_INVALID_HDR,
+ /** @SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND: no VXLAN device found for VNI */
+ SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND,
+ /** @SKB_DROP_REASON_MAC_INVALID_SOURCE: source mac is invalid */
+ SKB_DROP_REASON_MAC_INVALID_SOURCE,
+ /**
+ * @SKB_DROP_REASON_VXLAN_ENTRY_EXISTS: trying to migrate a static
+ * entry or an entry pointing to a nexthop.
+ */
+ SKB_DROP_REASON_VXLAN_ENTRY_EXISTS,
+ /** @SKB_DROP_REASON_VXLAN_NO_REMOTE: no remote found for xmit */
+ SKB_DROP_REASON_VXLAN_NO_REMOTE,
+ /**
+ * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to
+ * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail.
+ */
+ SKB_DROP_REASON_IP_TUNNEL_ECN,
+ /**
+ * @SKB_DROP_REASON_TUNNEL_TXINFO: packet without necessary metadata
+ * reached a device which is in "external" mode.
+ */
+ SKB_DROP_REASON_TUNNEL_TXINFO,
+ /**
+ * @SKB_DROP_REASON_LOCAL_MAC: the source MAC address is equal to
+ * the MAC address of the local netdev.
+ */
+ SKB_DROP_REASON_LOCAL_MAC,
+ /**
+ * @SKB_DROP_REASON_ARP_PVLAN_DISABLE: packet which is not IP is
+ * forwarded to the in_dev, and the proxy_arp_pvlan is not
+ * enabled.
+ */
+ SKB_DROP_REASON_ARP_PVLAN_DISABLE,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index d7a6c2930277..72ae65e7246a 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -885,21 +885,6 @@ struct dsa_switch_ops {
*/
void (*phylink_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
- struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
- int port,
- phy_interface_t iface);
- void (*phylink_mac_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
/*
diff --git a/include/net/eee.h b/include/net/eee.h
index 84837aba3cd9..cfab1b8bc46a 100644
--- a/include/net/eee.h
+++ b/include/net/eee.h
@@ -13,10 +13,7 @@ struct eee_config {
static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
{
/* eee_enabled is the master on/off */
- if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
- return false;
-
- return true;
+ return eeecfg->eee_enabled && eeecfg->tx_lpi_enabled;
}
static inline void eeecfg_to_eee(struct ethtool_keee *eee,
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index 6d59221ff05a..48aad6128fea 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -28,7 +28,7 @@ enum fib_event_type {
struct fib_notifier_ops {
int family;
struct list_head list;
- unsigned int (*fib_seq_read)(struct net *net);
+ unsigned int (*fib_seq_read)(const struct net *net);
int (*fib_dump)(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
struct module *owner;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index d17855c52ef9..04383d90a1e3 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -176,7 +176,7 @@ int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
-unsigned int fib_rules_seq_read(struct net *net, int family);
+unsigned int fib_rules_seq_read(const struct net *net, int family);
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 292cd8f4b762..596ab9791e4d 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -685,6 +685,7 @@ struct flow_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
+ bool skip_sw;
struct netlink_ext_ack *extack;
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index c1d91f1d20f6..d096cc6352de 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -124,7 +124,8 @@ struct genl_family {
* @genlhdr: generic netlink message header
* @attrs: netlink attributes
* @_net: network namespace
- * @user_ptr: user pointers
+ * @ctx: storage space for the use by the family
+ * @user_ptr: user pointers (deprecated, use ctx instead)
* @extack: extended ACK report struct
*/
struct genl_info {
@@ -135,7 +136,10 @@ struct genl_info {
struct genlmsghdr * genlhdr;
struct nlattr ** attrs;
possible_net_t _net;
- void * user_ptr[2];
+ union {
+ u8 ctx[NETLINK_CTX_SIZE];
+ void * user_ptr[2];
+ };
struct netlink_ext_ack *extack;
};
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index c0deaafebfdc..3c82fad904d4 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -197,12 +197,12 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
struct inet_connection_sock *icsk = inet_csk(sk);
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
- icsk->icsk_pending = 0;
+ smp_store_release(&icsk->icsk_pending, 0);
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.pending = 0;
+ smp_store_release(&icsk->icsk_ack.pending, 0);
icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
@@ -229,11 +229,12 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
- icsk->icsk_pending = what;
+ smp_store_release(&icsk->icsk_pending, what);
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
+ smp_store_release(&icsk->icsk_ack.pending,
+ icsk->icsk_ack.pending | ICSK_ACK_TIMER);
icsk->icsk_ack.timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
} else {
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 394c3b66065e..56d8bc5593d3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -174,6 +174,7 @@ struct inet_cork {
__s16 tos;
char priority;
__u16 gso_size;
+ u32 ts_opt_id;
u64 transmit_time;
u32 mark;
};
@@ -241,7 +242,8 @@ struct inet_sock {
struct inet_cork_full cork;
};
-#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_TS_OPT_ID 2 /* ts_opt_id field is valid, overriding sk_tskey */
enum {
INET_FLAGS_PKTINFO = 0,
@@ -319,8 +321,10 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
static inline struct sock *sk_to_full_sk(struct sock *sk)
{
#ifdef CONFIG_INET
- if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV)
sk = inet_reqsk(sk)->rsk_listener;
+ if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT)
+ sk = NULL;
#endif
return sk;
}
@@ -329,8 +333,10 @@ static inline struct sock *sk_to_full_sk(struct sock *sk)
static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
{
#ifdef CONFIG_INET
- if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV)
sk = ((const struct request_sock *)sk)->rsk_listener;
+ if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT)
+ sk = NULL;
#endif
return sk;
}
diff --git a/include/net/ip.h b/include/net/ip.h
index d92d3bc3ec0e..0e548c1f2a0e 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -288,7 +288,8 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
+ struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -424,6 +425,11 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}
+static inline dscp_t ip4h_dscp(const struct iphdr *ip4h)
+{
+ return inet_dsfield_to_dscp(ip4h->tos);
+}
+
static inline int ip_mtu_locked(const struct dst_entry *dst)
{
const struct rtable *rt = dst_rtable(dst);
@@ -684,6 +690,11 @@ static inline unsigned int ipv4_addr_hash(__be32 ip)
return (__force unsigned int) ip;
}
+static inline u32 __ipv4_addr_hash(const __be32 ip, const u32 initval)
+{
+ return jhash_1word((__force u32)ip, initval);
+}
+
static inline u32 ipv4_portaddr_hash(const struct net *net,
__be32 saddr,
unsigned int port)
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 6cb867ce4878..7c87873ae211 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -394,7 +394,7 @@ struct fib6_table {
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
unsigned int flags;
- unsigned int fib_seq;
+ unsigned int fib_seq; /* writes protected by rtnl_mutex */
struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -563,7 +563,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
int __net_init fib6_notifier_init(struct net *net);
void __net_exit fib6_notifier_exit(struct net *net);
-unsigned int fib6_tables_seq_read(struct net *net);
+unsigned int fib6_tables_seq_read(const struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
@@ -632,7 +632,7 @@ void fib6_rules_cleanup(void);
bool fib6_rule_default(const struct fib_rule *rule);
int fib6_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
-unsigned int fib6_rules_seq_read(struct net *net);
+unsigned int fib6_rules_seq_read(const struct net *net);
static inline bool fib6_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
@@ -676,7 +676,7 @@ static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb,
{
return 0;
}
-static inline unsigned int fib6_rules_seq_read(struct net *net)
+static inline unsigned int fib6_rules_seq_read(const struct net *net)
{
return 0;
}
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 967e4dc555fa..a113c11ab56b 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -347,7 +347,7 @@ static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
return 0;
}
-static inline unsigned int fib4_rules_seq_read(struct net *net)
+static inline unsigned int fib4_rules_seq_read(const struct net *net)
{
return 0;
}
@@ -411,7 +411,7 @@ static inline bool fib4_has_custom_rules(const struct net *net)
bool fib4_rule_default(const struct fib_rule *rule);
int fib4_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
-unsigned int fib4_rules_seq_read(struct net *net);
+unsigned int fib4_rules_seq_read(const struct net *net);
static inline bool fib4_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
@@ -449,8 +449,21 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
__be32 fib_compute_spec_dst(struct sk_buff *skb);
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
- u8 tos, int oif, struct net_device *dev,
+ dscp_t dscp, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);
+
+static inline enum skb_drop_reason
+fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst,
+ dscp_t dscp, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag)
+{
+ int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev,
+ itag);
+ if (err < 0)
+ return -err;
+ return SKB_NOT_DROPPED_YET;
+}
+
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 6a070478254d..1aa31bdb2b31 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -439,7 +439,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
-static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+static inline enum skb_drop_reason
+pskb_inet_may_pull_reason(struct sk_buff *skb)
{
int nhlen;
@@ -456,16 +457,22 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
nhlen = 0;
}
- return pskb_network_may_pull(skb, nhlen);
+ return pskb_network_may_pull_reason(skb, nhlen);
+}
+
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET;
}
/* Variant of pskb_inet_may_pull().
*/
-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
- bool inner_proto_inherit)
+static inline enum skb_drop_reason
+skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit)
{
int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
+ enum skb_drop_reason reason;
/* Essentially this is skb_protocol(skb, true)
* And we get MAC len.
@@ -486,11 +493,13 @@ static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
* a base network header in skb->head.
*/
- if (!pskb_may_pull(skb, maclen + nhlen))
- return false;
+ reason = pskb_may_pull_reason(skb, maclen + nhlen);
+ if (reason)
+ return reason;
skb_set_network_header(skb, maclen);
- return true;
+
+ return SKB_NOT_DROPPED_YET;
}
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 7af1082ea9a0..b80e474cb0aa 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -279,8 +279,6 @@
#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
/* SET : Omit payload from generated iwevent */
#define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */
-/* Driver level flags */
-#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
/****************************** TYPES ******************************/
@@ -373,11 +371,10 @@ struct iw_handler_def {
*/
struct iw_ioctl_description {
__u8 header_type; /* NULL, iw_point or other */
- __u8 token_type; /* Future */
+ __u8 flags; /* Special handling of the request */
__u16 token_size; /* Granularity of payload */
__u16 min_tokens; /* Min acceptable token number */
__u16 max_tokens; /* Max acceptable token number */
- __u32 flags; /* Special handling of the request */
};
/* Need to think of short header translation table. Later. */
@@ -404,26 +401,6 @@ struct iw_spy_data {
u_char spy_thr_under[IW_MAX_SPY];
};
-/* --------------------- DEVICE WIRELESS DATA --------------------- */
-/*
- * This is all the wireless data specific to a device instance that
- * is managed by the core of Wireless Extensions or the 802.11 layer.
- * We only keep pointer to those structures, so that a driver is free
- * to share them between instances.
- * This structure should be initialised before registering the device.
- * Access to this data follow the same rules as any other struct net_device
- * data (i.e. valid as long as struct net_device exist, same locking rules).
- */
-/* Forward declaration */
-struct libipw_device;
-/* The struct */
-struct iw_public_data {
- /* Driver enhanced spy support */
- struct iw_spy_data * spy_data;
- /* Legacy structure managed by the ipw2x00-specific IEEE 802.11 layer */
- struct libipw_device * libipw;
-};
-
/**************************** PROTOTYPES ****************************/
/*
* Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c).
@@ -443,22 +420,6 @@ static inline void wireless_nlevent_flush(void) {}
/* We may need a function to send a stream of events to user space.
* More on that later... */
-/* Standard handler for SIOCSIWSPY */
-int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCGIWSPY */
-int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCSIWTHRSPY */
-int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCGIWTHRSPY */
-int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Driver call to update spy records */
-void wireless_spy_update(struct net_device *dev, unsigned char *address,
- struct iw_quality *wstats);
-
/************************* INLINE FUNCTIONS *************************/
/*
* Function that are so simple that it's more efficient inlining them
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 031c661aa14d..2d6141f28b53 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -78,7 +78,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
struct net_device *dev;
int rc = 0;
- if (likely(ifindex)) {
+ if (ifindex) {
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
deleted file mode 100644
index fd0f15d87d80..000000000000
--- a/include/net/lib80211.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * lib80211.h -- common bits for IEEE802.11 wireless drivers
- *
- * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
- *
- * Some bits copied from old ieee80211 component, w/ original copyright
- * notices below:
- *
- * Original code based on Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <j@w1.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- *
- * Copyright (c) 2004, Intel Corporation
- *
- */
-
-#ifndef LIB80211_H
-#define LIB80211_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-#include <linux/if.h>
-#include <linux/skbuff.h>
-#include <linux/ieee80211.h>
-#include <linux/timer.h>
-#include <linux/seq_file.h>
-
-#define NUM_WEP_KEYS 4
-
-enum {
- IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
-};
-
-struct module;
-
-struct lib80211_crypto_ops {
- const char *name;
- struct list_head list;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void *(*init) (int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit) (void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
- int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
- int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
- void *priv);
-
- int (*set_key) (void *key, int len, u8 * seq, void *priv);
- int (*get_key) (void *key, int len, u8 * seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- void (*print_stats) (struct seq_file *m, void *priv);
-
- /* Crypto specific flag get/set for configuration settings */
- unsigned long (*get_flags) (void *priv);
- unsigned long (*set_flags) (unsigned long flags, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the buffer and correct
- * length must be returned */
- int extra_mpdu_prefix_len, extra_mpdu_postfix_len;
- int extra_msdu_prefix_len, extra_msdu_postfix_len;
-
- struct module *owner;
-};
-
-struct lib80211_crypt_data {
- struct list_head list; /* delayed deletion list */
- const struct lib80211_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-struct lib80211_crypt_info {
- char *name;
- /* Most clients will already have a lock,
- so just point to that. */
- spinlock_t *lock;
-
- struct lib80211_crypt_data *crypt[NUM_WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct list_head crypt_deinit_list;
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
-};
-
-int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
- spinlock_t *lock);
-void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
-int lib80211_register_crypto_ops(const struct lib80211_crypto_ops *ops);
-int lib80211_unregister_crypto_ops(const struct lib80211_crypto_ops *ops);
-const struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
-void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
- struct lib80211_crypt_data **crypt);
-
-#endif /* LIB80211_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 333e0fae6796..a97c9f85ae9a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -213,7 +213,7 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
- * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_MIN_DEF: The min chandef changed
* @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
* bandwidth) OFDMA settings need to be changed
* @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
@@ -224,7 +224,7 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
- IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_MIN_DEF = BIT(4),
IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
};
@@ -740,6 +740,19 @@ struct ieee80211_parsed_tpe {
* @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the
* reception of an EHT TB PPDU on an RU that spans the entire PPDU
* bandwidth
+ * @bss_param_ch_cnt: in BSS-mode, the BSS params change count. This
+ * information is the latest known value. It can come from this link's
+ * beacon or from a beacon sent by another link.
+ * @bss_param_ch_cnt_link_id: in BSS-mode, the link_id to which the beacon
+ * that updated &bss_param_ch_cnt belongs. E.g. if link 1 doesn't hear
+ * its beacons, and link 2 sent a beacon with an RNR element that updated
+ * link 1's BSS params change count, then, link 1's
+ * bss_param_ch_cnt_link_id will be 2. That means that link 1 knows that
+ * link 2 was the link that updated its bss_param_ch_cnt value.
+ * In case link 1 hears its beacon again, bss_param_ch_cnt_link_id will
+ * be updated to 1, even if bss_param_ch_cnt didn't change. This allows
+ * the link to know that it heard the latest value from its own beacon
+ * (as opposed to hearing its value from another link's beacon).
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
@@ -834,6 +847,8 @@ struct ieee80211_bss_conf {
bool eht_su_beamformee;
bool eht_mu_beamformer;
bool eht_80mhz_full_bw_ul_mumimo;
+ u8 bss_param_ch_cnt;
+ u8 bss_param_ch_cnt_link_id;
};
/**
@@ -1448,8 +1463,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
* @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
* on this subframe
- * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
- * is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
* done by the hardware
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
@@ -1521,7 +1534,7 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_LAST_KNOWN = BIT(12),
RX_FLAG_AMPDU_IS_LAST = BIT(13),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
- RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
+ /* one free bit at 15 */
RX_FLAG_MACTIME = BIT(16) | BIT(17),
RX_FLAG_MACTIME_PLCP_START = 1 << 16,
RX_FLAG_MACTIME_START = 2 << 16,
@@ -1618,7 +1631,6 @@ enum mac80211_rx_encoding {
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
- * @ampdu_delimiter_crc: A-MPDU delimiter CRC
* @zero_length_psdu_type: radiotap type of the 0-length PSDU
* @link_valid: if the link which is identified by @link_id is valid. This flag
* is set only when connection is MLO.
@@ -1656,7 +1668,6 @@ struct ieee80211_rx_status {
s8 signal;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
- u8 ampdu_delimiter_crc;
u8 zero_length_psdu_type;
u8 link_valid:1, link_id:4;
};
@@ -2683,6 +2694,11 @@ struct ieee80211_txq {
* a virtual monitor interface when monitor interfaces are the only
* active interfaces.
*
+ * @IEEE80211_HW_NO_VIRTUAL_MONITOR: The driver would like to be informed
+ * of any monitor interface, as well as their configured channel.
+ * This is useful for supporting multiple monitor interfaces on different
+ * channels.
+ *
* @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to
* be created. It is expected user-space will create vifs as
* desired (and thus have them named as desired).
@@ -2842,6 +2858,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
IEEE80211_HW_MFP_CAPABLE,
IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_VIRTUAL_MONITOR,
IEEE80211_HW_NO_AUTO_VIF,
IEEE80211_HW_SW_CRYPTO_CONTROL,
IEEE80211_HW_SUPPORT_FAST_XMIT,
@@ -4075,8 +4092,8 @@ struct ieee80211_prep_tx_info {
* in @sta_state.
* The callback can sleep.
*
- * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
- * used to transmit to the station. The changes are advertised with bits
+ * @link_sta_rc_update: Notifies the driver of changes to the bitrates that can
+ * be used to transmit to the station. The changes are advertised with bits
* from &enum ieee80211_rate_control_changed and the values are reflected
* in the station data. This callback should only be used when the driver
* uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
@@ -4444,6 +4461,12 @@ struct ieee80211_prep_tx_info {
* if the requested TID-To-Link mapping can be accepted or not.
* If it's not accepted the driver may suggest a preferred mapping and
* modify @ttlm parameter with the suggested TID-to-Link mapping.
+ * @prep_add_interface: prepare for interface addition. This can be used by
+ * drivers to prepare for the addition of a new interface, e.g., allocate
+ * the needed resources etc. This callback doesn't guarantee that an
+ * interface with the specified type would be added, and thus drivers that
+ * implement this callback need to handle such cases. The type is the full
+ * &enum nl80211_iftype.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4560,10 +4583,10 @@ struct ieee80211_ops {
void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_rc_update)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u32 changed);
+ void (*link_sta_rc_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed);
void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -4828,6 +4851,8 @@ struct ieee80211_ops {
enum ieee80211_neg_ttlm_res
(*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_neg_ttlm *ttlm);
+ void (*prep_add_interface)(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
};
/**
@@ -7647,12 +7672,12 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
*
* - change_vif_links(0x11)
* - unassign_vif_chanctx(link_id=0)
+ * - assign_vif_chanctx(link_id=4)
* - change_sta_links(0x11) for each affected STA (the AP)
* (TDLS connections on now inactive links should be torn down)
* - remove group keys on the old link (link_id 0)
* - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
* - change_sta_links(0x10) for each affected STA (the AP)
- * - assign_vif_chanctx(link_id=4)
* - change_vif_links(0x10)
*
* Return: 0 on success. An error code otherwise.
@@ -7681,6 +7706,33 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
*/
void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif);
+/**
+ * ieee80211_chan_width_to_rx_bw - convert channel width to STA RX bandwidth
+ * @width: the channel width value to convert
+ * Return: the STA RX bandwidth value for the channel width
+ */
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
+{
+ switch (width) {
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_STA_RX_BW_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_STA_RX_BW_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_STA_RX_BW_80;
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ return IEEE80211_STA_RX_BW_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_STA_RX_BW_320;
+ }
+}
+
/* for older drivers - let's not document these ... */
int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx);
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index de47fa533b15..90f56656b572 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -267,7 +267,8 @@ struct gdma_event {
struct gdma_queue;
struct mana_eq {
- struct gdma_queue *eq;
+ struct gdma_queue *eq;
+ struct dentry *mana_eq_debugfs;
};
typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
@@ -365,6 +366,7 @@ struct gdma_irq_context {
struct gdma_context {
struct device *dev;
+ struct dentry *mana_pci_debugfs;
/* Per-vPort max number of queues */
unsigned int max_num_queues;
@@ -878,5 +880,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
+void mana_register_debugfs(void);
+void mana_unregister_debugfs(void);
#endif /* _GDMA_H */
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index f2a5200d8a0f..0d00b24eacaf 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -43,7 +43,7 @@ enum TRI_STATE {
* size beyond this value gets rejected by __alloc_page() call.
*/
#define MAX_RX_BUFFERS_PER_QUEUE 8192
-#define DEF_RX_BUFFERS_PER_QUEUE 512
+#define DEF_RX_BUFFERS_PER_QUEUE 1024
#define MIN_RX_BUFFERS_PER_QUEUE 128
/* This max value for TX buffers is derived as the maximum allocatable
@@ -350,6 +350,7 @@ struct mana_rxq {
int xdp_rc; /* XDP redirect return code */
struct page_pool *page_pool;
+ struct dentry *mana_rx_debugfs;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
@@ -363,6 +364,8 @@ struct mana_tx_qp {
struct mana_cq tx_cq;
mana_handle_t tx_object;
+
+ struct dentry *mana_tx_debugfs;
};
struct mana_ethtool_stats {
@@ -407,6 +410,7 @@ struct mana_context {
u16 num_ports;
struct mana_eq *eqs;
+ struct dentry *mana_eqs_debugfs;
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
};
@@ -468,6 +472,9 @@ struct mana_port_context {
bool port_st_save; /* Saved port state */
struct mana_ethtool_stats eth_stats;
+
+ /* Debugfs */
+ struct dentry *mana_port_debugfs;
};
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -494,6 +501,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
+extern struct dentry *mana_debugfs_root;
/* A CQ can be created not associated with any EQ */
#define GDMA_CQ_NO_EQ 0xffff
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 28d59ae94ca3..1ecbff7116f6 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -298,4 +298,22 @@ void mctp_routes_exit(void);
int mctp_device_init(void);
void mctp_device_exit(void);
+/* MCTP IDs and Codes from DMTF specification
+ * "DSP0239 Management Component Transport Protocol (MCTP) IDs and Codes"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0239_1.11.1.pdf
+ */
+enum mctp_phys_binding {
+ MCTP_PHYS_BINDING_UNSPEC = 0x00,
+ MCTP_PHYS_BINDING_SMBUS = 0x01,
+ MCTP_PHYS_BINDING_PCIE_VDM = 0x02,
+ MCTP_PHYS_BINDING_USB = 0x03,
+ MCTP_PHYS_BINDING_KCS = 0x04,
+ MCTP_PHYS_BINDING_SERIAL = 0x05,
+ MCTP_PHYS_BINDING_I3C = 0x06,
+ MCTP_PHYS_BINDING_MMBI = 0x07,
+ MCTP_PHYS_BINDING_PCC = 0x08,
+ MCTP_PHYS_BINDING_UCIE = 0x09,
+ MCTP_PHYS_BINDING_VENDOR = 0xFF,
+};
+
#endif /* __NET_MCTP_H */
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
index 5c0d04b5c12c..957d9ef924c5 100644
--- a/include/net/mctpdevice.h
+++ b/include/net/mctpdevice.h
@@ -22,6 +22,7 @@ struct mctp_dev {
refcount_t refs;
unsigned int net;
+ enum mctp_phys_binding binding;
const struct mctp_netdev_ops *ops;
@@ -44,7 +45,8 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
int mctp_register_netdev(struct net_device *dev,
- const struct mctp_netdev_ops *ops);
+ const struct mctp_netdev_ops *ops,
+ enum mctp_phys_binding binding);
void mctp_unregister_netdev(struct net_device *dev);
void mctp_dev_hold(struct mctp_dev *mdev);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index a44f262a7384..9a832cab5b1d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -29,6 +29,7 @@
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <net/rtnetlink.h>
+#include <net/neighbour_tables.h>
/*
* NUD stands for "neighbor unreachability detection"
@@ -135,7 +136,8 @@ struct neigh_statistics {
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
struct neighbour {
- struct neighbour __rcu *next;
+ struct hlist_node hash;
+ struct hlist_node dev_list;
struct neigh_table *tbl;
struct neigh_parms *parms;
unsigned long confirmed;
@@ -190,7 +192,7 @@ struct pneigh_entry {
#define NEIGH_NUM_HASH_RND 4
struct neigh_hash_table {
- struct neighbour __rcu **hash_buckets;
+ struct hlist_head *hash_heads;
unsigned int hash_shift;
__u32 hash_rnd[NEIGH_NUM_HASH_RND];
struct rcu_head rcu;
@@ -236,14 +238,6 @@ struct neigh_table {
struct pneigh_entry **phash_buckets;
};
-enum {
- NEIGH_ARP_TABLE = 0,
- NEIGH_ND_TABLE = 1,
- NEIGH_DN_TABLE = 2,
- NEIGH_NR_TABLES,
- NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
-};
-
static inline int neigh_parms_family(struct neigh_parms *p)
{
return p->tbl->family;
@@ -276,6 +270,12 @@ static inline void *neighbour_priv(const struct neighbour *n)
extern const struct nla_policy nda_policy[];
+#define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash)
+#define neigh_for_each_in_bucket_rcu(pos, head) \
+ hlist_for_each_entry_rcu(pos, head, hash)
+#define neigh_for_each_in_bucket_safe(pos, tmp, head) \
+ hlist_for_each_entry_safe(pos, tmp, head, hash)
+
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
@@ -304,12 +304,9 @@ static inline struct neighbour *___neigh_lookup_noref(
u32 hash_val;
hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference(n->next)) {
+ neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val])
if (n->dev == dev && key_eq(n, pkey))
return n;
- }
return NULL;
}
@@ -350,7 +347,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
-bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
+bool neigh_remove_one(struct neighbour *ndel);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
diff --git a/include/net/neighbour_tables.h b/include/net/neighbour_tables.h
new file mode 100644
index 000000000000..bcffbe8f7601
--- /dev/null
+++ b/include/net/neighbour_tables.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_NEIGHBOUR_TABLES_H
+#define _NET_NEIGHBOUR_TABLES_H
+
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
+};
+
+#endif
diff --git a/include/net/net_debug.h b/include/net/net_debug.h
index 1e74684cbbdb..9fecb1496be3 100644
--- a/include/net/net_debug.h
+++ b/include/net/net_debug.h
@@ -149,9 +149,11 @@ do { \
#if defined(CONFIG_DEBUG_NET)
-#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define DEBUG_NET_WARN_ON_ONCE(cond) ((void)WARN_ON_ONCE(cond))
+#define DEBUG_NET_WARN_ONCE(cond, format...) ((void)WARN_ONCE(cond, format))
#else
#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define DEBUG_NET_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
#endif /* _LINUX_NET_DEBUG_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e67b483cc8bb..873c0f9fdac6 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -188,6 +188,10 @@ struct net {
#if IS_ENABLED(CONFIG_SMC)
struct netns_smc smc;
#endif
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+ /* Move to a better place when the config guard is removed. */
+ struct mutex rtnl_mutex;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
diff --git a/include/net/net_shaper.h b/include/net/net_shaper.h
new file mode 100644
index 000000000000..5c3f49b52fe9
--- /dev/null
+++ b/include/net/net_shaper.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_SHAPER_H_
+#define _NET_SHAPER_H_
+
+#include <linux/types.h>
+
+#include <uapi/linux/net_shaper.h>
+
+struct net_device;
+struct devlink;
+struct netlink_ext_ack;
+
+enum net_shaper_binding_type {
+ NET_SHAPER_BINDING_TYPE_NETDEV,
+ /* NET_SHAPER_BINDING_TYPE_DEVLINK_PORT */
+};
+
+struct net_shaper_binding {
+ enum net_shaper_binding_type type;
+ union {
+ struct net_device *netdev;
+ struct devlink *devlink;
+ };
+};
+
+struct net_shaper_handle {
+ enum net_shaper_scope scope;
+ u32 id;
+};
+
+/**
+ * struct net_shaper - represents a shaping node on the NIC H/W
+ * zeroed field are considered not set.
+ * @parent: Unique identifier for the shaper parent, usually implied
+ * @handle: Unique identifier for this shaper
+ * @metric: Specify if the rate limits refers to PPS or BPS
+ * @bw_min: Minimum guaranteed rate for this shaper
+ * @bw_max: Maximum peak rate allowed for this shaper
+ * @burst: Maximum burst for the peek rate of this shaper
+ * @priority: Scheduling priority for this shaper
+ * @weight: Scheduling weight for this shaper
+ */
+struct net_shaper {
+ struct net_shaper_handle parent;
+ struct net_shaper_handle handle;
+ enum net_shaper_metric metric;
+ u64 bw_min;
+ u64 bw_max;
+ u64 burst;
+ u32 priority;
+ u32 weight;
+
+ /* private: */
+ u32 leaves; /* accounted only for NODE scope */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct net_shaper_ops - Operations on device H/W shapers
+ *
+ * The operations applies to either net_device and devlink objects.
+ * The initial shaping configuration at device initialization is empty:
+ * does not constraint the rate in any way.
+ * The network core keeps track of the applied user-configuration in
+ * the net_device or devlink structure.
+ * The operations are serialized via a per device lock.
+ *
+ * Device not supporting any kind of nesting should not provide the
+ * group operation.
+ *
+ * Each shaper is uniquely identified within the device with a 'handle'
+ * comprising the shaper scope and a scope-specific id.
+ */
+struct net_shaper_ops {
+ /**
+ * @group: create the specified shapers scheduling group
+ *
+ * Nest the @leaves shapers identified under the * @node shaper.
+ * All the shapers belong to the device specified by @binding.
+ * The @leaves arrays size is specified by @leaves_count.
+ * Create either the @leaves and the @node shaper; or if they already
+ * exists, links them together in the desired way.
+ * @leaves scope must be NET_SHAPER_SCOPE_QUEUE.
+ */
+ int (*group)(struct net_shaper_binding *binding, int leaves_count,
+ const struct net_shaper *leaves,
+ const struct net_shaper *node,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @set: Updates the specified shaper
+ *
+ * Updates or creates the @shaper on the device specified by @binding.
+ */
+ int (*set)(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @delete: Removes the specified shaper
+ *
+ * Removes the shaper configuration as identified by the given @handle
+ * on the device specified by @binding, restoring the default behavior.
+ */
+ int (*delete)(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @capabilities: get the shaper features supported by the device
+ *
+ * Fills the bitmask @cap with the supported capabilities for the
+ * specified @scope and device specified by @binding.
+ */
+ void (*capabilities)(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope, unsigned long *cap);
+};
+
+#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 91ae20cb7648..80a537ac26cd 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1103,6 +1103,7 @@ struct nft_rule_blob {
* @name: name of the chain
* @udlen: user data length
* @udata: user data in the chain
+ * @rcu_head: rcu head for deferred release
* @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
@@ -1120,6 +1121,7 @@ struct nft_chain {
char *name;
u16 udlen;
u8 *udata;
+ struct rcu_head rcu_head;
/* Only used during control plane commit phase: */
struct nft_rule_blob *blob_next;
@@ -1263,6 +1265,7 @@ static inline void nft_use_inc_restore(u32 *use)
* @sets: sets in the table
* @objects: stateful objects in the table
* @flowtables: flow tables in the table
+ * @net: netnamespace this table belongs to
* @hgenerator: handle generator state
* @handle: table handle
* @use: number of chain references to this table
@@ -1282,6 +1285,7 @@ struct nft_table {
struct list_head sets;
struct list_head objects;
struct list_head flowtables;
+ possible_net_t net;
u64 hgenerator;
u64 handle;
u32 use;
@@ -1463,7 +1467,8 @@ struct nft_flowtable {
struct nf_flowtable data;
};
-struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
+struct nft_flowtable *nft_flowtable_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla,
u8 genmask);
@@ -1754,28 +1759,29 @@ enum nft_trans_elem_flags {
NFT_TRANS_UPD_EXPIRATION = (1 << 1),
};
+struct nft_elem_update {
+ u64 timeout;
+ u64 expiration;
+ u8 flags;
+};
+
+struct nft_trans_one_elem {
+ struct nft_elem_priv *priv;
+ struct nft_elem_update *update;
+};
+
struct nft_trans_elem {
struct nft_trans nft_trans;
struct nft_set *set;
- struct nft_elem_priv *elem_priv;
- u64 timeout;
- u64 expiration;
- u8 update_flags;
bool bound;
+ unsigned int nelems;
+ struct nft_trans_one_elem elems[] __counted_by(nelems);
};
#define nft_trans_container_elem(t) \
container_of(t, struct nft_trans_elem, nft_trans)
#define nft_trans_elem_set(trans) \
nft_trans_container_elem(trans)->set
-#define nft_trans_elem_priv(trans) \
- nft_trans_container_elem(trans)->elem_priv
-#define nft_trans_elem_update_flags(trans) \
- nft_trans_container_elem(trans)->update_flags
-#define nft_trans_elem_timeout(trans) \
- nft_trans_container_elem(trans)->timeout
-#define nft_trans_elem_expiration(trans) \
- nft_trans_container_elem(trans)->expiration
#define nft_trans_elem_set_bound(trans) \
nft_trans_container_elem(trans)->bound
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 529160f76cac..02914b1df38b 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -97,7 +97,7 @@ struct calipso_doi;
/* NetLabel audit information */
struct netlbl_audit {
- u32 secid;
+ struct lsm_prop prop;
kuid_t loginuid;
unsigned int sessionid;
};
@@ -208,6 +208,7 @@ struct netlbl_lsm_secattr {
* struct netlbl_calipso_ops - NetLabel CALIPSO operations
* @doi_add: add a CALIPSO DOI
* @doi_free: free a CALIPSO DOI
+ * @doi_remove: remove a CALIPSO DOI
* @doi_getdef: returns a reference to a DOI
* @doi_putdef: releases a reference of a DOI
* @doi_walk: enumerate the DOI list
diff --git a/include/net/netlink.h b/include/net/netlink.h
index db6af207287c..39eaa6be6ca8 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -142,6 +142,8 @@
* nla_get_flag(nla) return 1 if flag is true
* nla_get_msecs(nla) get payload for a msecs attribute
*
+ * The same functions also exist with _default().
+ *
* Attribute Misc:
* nla_memcpy(dest, nla, count) copy attribute into memory
* nla_memcmp(nla, data, size) compare attribute with memory area
@@ -469,6 +471,7 @@ struct nla_policy {
.max = _len \
}
#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+#define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len)
/**
* struct nl_info - netlink source information
@@ -1695,6 +1698,20 @@ static inline u32 nla_get_u32(const struct nlattr *nla)
}
/**
+ * nla_get_u32_default - return payload of u32 attribute or default
+ * @nla: u32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u32 nla_get_u32_default(const struct nlattr *nla, u32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u32(nla);
+}
+
+/**
* nla_get_be32 - return payload of __be32 attribute
* @nla: __be32 netlink attribute
*/
@@ -1704,6 +1721,21 @@ static inline __be32 nla_get_be32(const struct nlattr *nla)
}
/**
+ * nla_get_be32_default - return payload of be32 attribute or default
+ * @nla: __be32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be32 nla_get_be32_default(const struct nlattr *nla,
+ __be32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be32(nla);
+}
+
+/**
* nla_get_le32 - return payload of __le32 attribute
* @nla: __le32 netlink attribute
*/
@@ -1713,6 +1745,21 @@ static inline __le32 nla_get_le32(const struct nlattr *nla)
}
/**
+ * nla_get_le32_default - return payload of le32 attribute or default
+ * @nla: __le32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le32 nla_get_le32_default(const struct nlattr *nla,
+ __le32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le32(nla);
+}
+
+/**
* nla_get_u16 - return payload of u16 attribute
* @nla: u16 netlink attribute
*/
@@ -1722,6 +1769,20 @@ static inline u16 nla_get_u16(const struct nlattr *nla)
}
/**
+ * nla_get_u16_default - return payload of u16 attribute or default
+ * @nla: u16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u16 nla_get_u16_default(const struct nlattr *nla, u16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u16(nla);
+}
+
+/**
* nla_get_be16 - return payload of __be16 attribute
* @nla: __be16 netlink attribute
*/
@@ -1731,6 +1792,21 @@ static inline __be16 nla_get_be16(const struct nlattr *nla)
}
/**
+ * nla_get_be16_default - return payload of be16 attribute or default
+ * @nla: __be16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be16 nla_get_be16_default(const struct nlattr *nla,
+ __be16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be16(nla);
+}
+
+/**
* nla_get_le16 - return payload of __le16 attribute
* @nla: __le16 netlink attribute
*/
@@ -1740,6 +1816,21 @@ static inline __le16 nla_get_le16(const struct nlattr *nla)
}
/**
+ * nla_get_le16_default - return payload of le16 attribute or default
+ * @nla: __le16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le16 nla_get_le16_default(const struct nlattr *nla,
+ __le16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le16(nla);
+}
+
+/**
* nla_get_u8 - return payload of u8 attribute
* @nla: u8 netlink attribute
*/
@@ -1749,6 +1840,20 @@ static inline u8 nla_get_u8(const struct nlattr *nla)
}
/**
+ * nla_get_u8_default - return payload of u8 attribute or default
+ * @nla: u8 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u8 nla_get_u8_default(const struct nlattr *nla, u8 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u8(nla);
+}
+
+/**
* nla_get_u64 - return payload of u64 attribute
* @nla: u64 netlink attribute
*/
@@ -1762,6 +1867,20 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
}
/**
+ * nla_get_u64_default - return payload of u64 attribute or default
+ * @nla: u64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u64 nla_get_u64_default(const struct nlattr *nla, u64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u64(nla);
+}
+
+/**
* nla_get_uint - return payload of uint attribute
* @nla: uint netlink attribute
*/
@@ -1773,6 +1892,20 @@ static inline u64 nla_get_uint(const struct nlattr *nla)
}
/**
+ * nla_get_uint_default - return payload of uint attribute or default
+ * @nla: uint netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u64 nla_get_uint_default(const struct nlattr *nla, u64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_uint(nla);
+}
+
+/**
* nla_get_be64 - return payload of __be64 attribute
* @nla: __be64 netlink attribute
*/
@@ -1786,6 +1919,21 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
}
/**
+ * nla_get_be64_default - return payload of be64 attribute or default
+ * @nla: __be64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be64 nla_get_be64_default(const struct nlattr *nla,
+ __be64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be64(nla);
+}
+
+/**
* nla_get_le64 - return payload of __le64 attribute
* @nla: __le64 netlink attribute
*/
@@ -1795,6 +1943,21 @@ static inline __le64 nla_get_le64(const struct nlattr *nla)
}
/**
+ * nla_get_le64_default - return payload of le64 attribute or default
+ * @nla: __le64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le64 nla_get_le64_default(const struct nlattr *nla,
+ __le64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le64(nla);
+}
+
+/**
* nla_get_s32 - return payload of s32 attribute
* @nla: s32 netlink attribute
*/
@@ -1804,6 +1967,20 @@ static inline s32 nla_get_s32(const struct nlattr *nla)
}
/**
+ * nla_get_s32_default - return payload of s32 attribute or default
+ * @nla: s32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s32 nla_get_s32_default(const struct nlattr *nla, s32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s32(nla);
+}
+
+/**
* nla_get_s16 - return payload of s16 attribute
* @nla: s16 netlink attribute
*/
@@ -1813,6 +1990,20 @@ static inline s16 nla_get_s16(const struct nlattr *nla)
}
/**
+ * nla_get_s16_default - return payload of s16 attribute or default
+ * @nla: s16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s16 nla_get_s16_default(const struct nlattr *nla, s16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s16(nla);
+}
+
+/**
* nla_get_s8 - return payload of s8 attribute
* @nla: s8 netlink attribute
*/
@@ -1822,6 +2013,20 @@ static inline s8 nla_get_s8(const struct nlattr *nla)
}
/**
+ * nla_get_s8_default - return payload of s8 attribute or default
+ * @nla: s8 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s8 nla_get_s8_default(const struct nlattr *nla, s8 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s8(nla);
+}
+
+/**
* nla_get_s64 - return payload of s64 attribute
* @nla: s64 netlink attribute
*/
@@ -1835,6 +2040,20 @@ static inline s64 nla_get_s64(const struct nlattr *nla)
}
/**
+ * nla_get_s64_default - return payload of s64 attribute or default
+ * @nla: s64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s64 nla_get_s64_default(const struct nlattr *nla, s64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s64(nla);
+}
+
+/**
* nla_get_sint - return payload of uint attribute
* @nla: uint netlink attribute
*/
@@ -1846,6 +2065,20 @@ static inline s64 nla_get_sint(const struct nlattr *nla)
}
/**
+ * nla_get_sint_default - return payload of sint attribute or default
+ * @nla: sint netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s64 nla_get_sint_default(const struct nlattr *nla, s64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_sint(nla);
+}
+
+/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
@@ -1868,6 +2101,21 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
}
/**
+ * nla_get_msecs_default - return payload of msecs attribute or default
+ * @nla: msecs netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline unsigned long nla_get_msecs_default(const struct nlattr *nla,
+ unsigned long defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_msecs(nla);
+}
+
+/**
* nla_get_in_addr - return payload of IPv4 address attribute
* @nla: IPv4 address netlink attribute
*/
@@ -1877,6 +2125,21 @@ static inline __be32 nla_get_in_addr(const struct nlattr *nla)
}
/**
+ * nla_get_in_addr_default - return payload of be32 attribute or default
+ * @nla: IPv4 address netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be32 nla_get_in_addr_default(const struct nlattr *nla,
+ __be32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_in_addr(nla);
+}
+
+/**
* nla_get_in6_addr - return payload of IPv6 address attribute
* @nla: IPv6 address netlink attribute
*/
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 78214f1b43a2..9b36f0ff0c20 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -15,6 +15,7 @@ struct netns_core {
int sysctl_somaxconn;
int sysctl_optmem_max;
u8 sysctl_txrehash;
+ u8 sysctl_tstamp_allow_data;
#ifdef CONFIG_PROC_FS
struct prot_inuse __percpu *prot_inuse;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 276f622f3516..3c014170e001 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -76,6 +76,8 @@ struct netns_ipv4 {
__cacheline_group_begin(netns_ipv4_read_rx);
u8 sysctl_ip_early_demux;
u8 sysctl_tcp_early_demux;
+ u8 sysctl_tcp_l3mdev_accept;
+ /* 3 bytes hole, try to pack */
int sysctl_tcp_reordering;
int sysctl_tcp_rmem[3];
__cacheline_group_end(netns_ipv4_read_rx);
@@ -151,9 +153,6 @@ struct netns_ipv4 {
u8 sysctl_fwmark_reflect;
u8 sysctl_tcp_fwmark_accept;
-#ifdef CONFIG_NET_L3_MASTER_DEV
- u8 sysctl_tcp_l3mdev_accept;
-#endif
u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
@@ -263,12 +262,14 @@ struct netns_ipv4 {
#endif
struct fib_notifier_ops *notifier_ops;
- unsigned int fib_seq; /* protected by rtnl_mutex */
+ unsigned int fib_seq; /* writes protected by rtnl_mutex */
struct fib_notifier_ops *ipmr_notifier_ops;
unsigned int ipmr_seq; /* protected by rtnl_mutex */
atomic_t rt_genid;
siphash_key_t ip_id_key;
+ struct hlist_head *inet_addr_lst;
+ struct delayed_work addr_chk_work;
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index ae60d6664095..23dd647fe024 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -43,6 +43,7 @@ struct netns_xfrm {
struct hlist_head __rcu *state_bysrc;
struct hlist_head __rcu *state_byspi;
struct hlist_head __rcu *state_byseq;
+ struct hlist_head __percpu *state_cache_input;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index dc36519d16aa..09efcaed7c3f 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -475,7 +475,7 @@ struct nci_rf_discover_ntf {
#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
struct activation_params_nfca_poll_iso_dep {
__u8 rats_res_len;
- __u8 rats_res[20];
+ __u8 rats_res[NFC_ATS_MAXSIZE];
};
struct activation_params_nfcb_poll_iso_dep {
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index ea8595651c38..e180bdf2f82b 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -265,6 +265,10 @@ struct nci_dev {
/* stored during intf_activated_ntf */
__u8 remote_gb[NFC_MAX_GT_LEN];
__u8 remote_gb_len;
+
+ /* stored during intf_activated_ntf */
+ __u8 target_ats[NFC_ATS_MAXSIZE];
+ __u8 target_ats_len;
};
/* ----- NCI Devices ----- */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 3a3781838c67..127e6c7d910d 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -86,6 +86,8 @@ struct nfc_ops {
* is a type A one. The %sens_res most significant byte must be byte 2
* as described by the NFC Forum digital specification (i.e. the platform
* configuration one) while %sens_res least significant byte is byte 1.
+ * @ats_len: length of Answer To Select in bytes
+ * @ats: Answer To Select returned by an ISO 14443 Type A target upon activation
*/
struct nfc_target {
u32 idx;
@@ -105,6 +107,8 @@ struct nfc_target {
u8 is_iso15693;
u8 iso15693_dsfid;
u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE];
+ u8 ats_len;
+ u8 ats[NFC_ATS_MAXSIZE];
};
/**
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index e9dc8dca5817..37a3e83531c6 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -11,13 +11,13 @@
#define PN_DEV_H
#include <linux/list.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
struct net;
struct phonet_device_list {
struct list_head list;
- struct mutex lock;
+ spinlock_t lock;
};
struct phonet_device_list *phonet_device_list(struct net *net);
@@ -38,11 +38,11 @@ int phonet_address_add(struct net_device *dev, u8 addr);
int phonet_address_del(struct net_device *dev, u8 addr);
u8 phonet_address_get(struct net_device *dev, u8 addr);
int phonet_address_lookup(struct net *net, u8 addr);
-void phonet_address_notify(int event, struct net_device *dev, u8 addr);
+void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr);
int phonet_route_add(struct net_device *dev, u8 daddr);
int phonet_route_del(struct net_device *dev, u8 daddr);
-void rtm_phonet_notify(int event, struct net_device *dev, u8 dst);
+void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst);
struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr);
struct net_device *phonet_route_output(struct net *net, u8 daddr);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 4880b3a7aced..cf199af85c52 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -755,6 +755,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio >> 16;
+ cls_common->skip_sw = tc_skip_sw(flags);
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack;
}
diff --git a/include/net/route.h b/include/net/route.h
index 1789f1e6640b..84cb1e04f5cd 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -156,12 +156,12 @@ static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4
* structure is only partially set, it may bypass some fib-rules.
*/
static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
- __be32 saddr, u8 tos, int oif,
- __u8 scope)
+ __be32 saddr, dscp_t dscp,
+ int oif, __u8 scope)
{
struct flowi4 fl4 = {
.flowi4_oif = oif,
- .flowi4_tos = tos,
+ .flowi4_tos = inet_dscp_to_dsfield(dscp),
.flowi4_scope = scope,
.daddr = daddr,
.saddr = saddr,
@@ -198,30 +198,35 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
fl4->fl4_gre_key = gre_key;
return ip_route_output_key(net, fl4);
}
-int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev,
- struct in_device *in_dev, u32 *itag);
-int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin);
-int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin,
- const struct sk_buff *hint);
-
-static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
+
+enum skb_drop_reason
+ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag);
+enum skb_drop_reason
+ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev);
+enum skb_drop_reason
+ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ const struct sk_buff *hint);
+
+static inline enum skb_drop_reason
+ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, dscp_t dscp,
+ struct net_device *devin)
{
- int err;
+ enum skb_drop_reason reason;
rcu_read_lock();
- err = ip_route_input_noref(skb, dst, src, tos, devin);
- if (!err) {
+ reason = ip_route_input_noref(skb, dst, src, dscp, devin);
+ if (!reason) {
skb_dst_force(skb);
if (!skb_dst(skb))
- err = -EINVAL;
+ reason = SKB_DROP_REASON_NOT_SPECIFIED;
}
rcu_read_unlock();
- return err;
+ return reason;
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 2d3eb7cb4dff..bc0069a8b6ea 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -3,6 +3,7 @@
#define __NET_RTNETLINK_H
#include <linux/rtnetlink.h>
+#include <linux/srcu.h>
#include <net/netlink.h>
typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
@@ -11,6 +12,8 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+#define RTNL_FLAG_DOIT_PERNET RTNL_FLAG_DOIT_UNLOCKED
+#define RTNL_FLAG_DOIT_PERNET_WIP RTNL_FLAG_DOIT_UNLOCKED
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */
@@ -29,6 +32,16 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
return msgtype & RTNL_KIND_MASK;
}
+/**
+ * struct rtnl_msg_handler - rtnetlink message type and handlers
+ *
+ * @owner: NULL for built-in, THIS_MODULE for module
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
+ */
struct rtnl_msg_handler {
struct module *owner;
int protocol;
@@ -38,11 +51,6 @@ struct rtnl_msg_handler {
int flags;
};
-void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
-int rtnl_register_module(struct module *owner, int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
-int rtnl_unregister(int protocol, int msgtype);
void rtnl_unregister_all(int protocol);
int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n);
@@ -64,9 +72,11 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
/**
* struct rtnl_link_ops - rtnetlink link operations
*
- * @list: Used internally
+ * @list: Used internally, protected by link_ops_mutex and SRCU
+ * @srcu: Used internally
* @kind: Identifier
* @netns_refund: Physical device, move to init_net on netns exit
+ * @peer_type: Peer device specific netlink attribute number (e.g. VETH_INFO_PEER)
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
@@ -95,6 +105,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*/
struct rtnl_link_ops {
struct list_head list;
+ struct srcu_struct srcu;
const char *kind;
@@ -107,6 +118,7 @@ struct rtnl_link_ops {
void (*setup)(struct net_device *dev);
bool netns_refund;
+ const u16 peer_type;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
@@ -155,16 +167,14 @@ struct rtnl_link_ops {
int *prividx, int attr);
};
-int __rtnl_link_register(struct rtnl_link_ops *ops);
-void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-
int rtnl_link_register(struct rtnl_link_ops *ops);
void rtnl_link_unregister(struct rtnl_link_ops *ops);
/**
* struct rtnl_af_ops - rtnetlink address family operations
*
- * @list: Used internally
+ * @list: Used internally, protected by RTNL and SRCU
+ * @srcu: Used internally
* @family: Address family
* @fill_link_af: Function to fill IFLA_AF_SPEC with address family
* specific netlink attributes.
@@ -177,6 +187,8 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops);
*/
struct rtnl_af_ops {
struct list_head list;
+ struct srcu_struct srcu;
+
int family;
int (*fill_link_af)(struct sk_buff *skb,
@@ -196,7 +208,7 @@ struct rtnl_af_ops {
size_t (*get_stats_af_size)(const struct net_device *dev);
};
-void rtnl_af_register(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
diff --git a/include/net/sock.h b/include/net/sock.h
index f29c14448938..7464e9f9f47c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -956,6 +956,12 @@ enum sock_flags {
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+/*
+ * The highest bit of sk_tsflags is reserved for kernel-internal
+ * SOCKCM_FLAG_TS_OPT_ID. There is a check in core/sock.c to control that
+ * SOF_TIMESTAMPING* values do not reach this reserved area
+ */
+#define SOCKCM_FLAG_TS_OPT_ID BIT(31)
static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
{
@@ -1754,6 +1760,15 @@ void sock_efree(struct sk_buff *skb);
#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
void sock_pfree(struct sk_buff *skb);
+
+static inline void skb_set_owner_edemux(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ }
+}
#else
#define sock_edemux sock_efree
#endif
@@ -1798,6 +1813,7 @@ struct sockcm_cookie {
u64 transmit_time;
u32 mark;
u32 tsflags;
+ u32 ts_opt_id;
};
static inline void sockcm_init(struct sockcm_cookie *sockc,
@@ -2655,39 +2671,48 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
sock_write_timestamp(sk, 0);
}
-void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
+void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags);
/**
* _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
- * @tsflags: timestamping flags to use
+ * @sockc: pointer to socket cmsg cookie to get timestamping info
* @tx_flags: completed with instructions for time stamping
* @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
*
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
*/
-static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+static inline void _sock_tx_timestamp(struct sock *sk,
+ const struct sockcm_cookie *sockc,
__u8 *tx_flags, __u32 *tskey)
{
+ __u32 tsflags = sockc->tsflags;
+
if (unlikely(tsflags)) {
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
- tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
+ if (tsflags & SOCKCM_FLAG_TS_OPT_ID)
+ *tskey = sockc->ts_opt_id;
+ else
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ }
}
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
}
-static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+static inline void sock_tx_timestamp(struct sock *sk,
+ const struct sockcm_cookie *sockc,
__u8 *tx_flags)
{
- _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+ _sock_tx_timestamp(sk, sockc, tx_flags, NULL);
}
-static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb,
+ const struct sockcm_cookie *sockc)
{
- _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ _sock_tx_timestamp(skb->sk, sockc, &skb_shinfo(skb)->tx_flags,
&skb_shinfo(skb)->tskey);
}
@@ -2791,6 +2816,16 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
+/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV or TIME_WAIT
+ * TCP SYNACK messages can be attached to LISTEN or NEW_SYN_RECV (depending on SYNCOOKIE)
+ * TCP RST and ACK can be attached to TIME_WAIT.
+ */
+static inline bool sk_listener_or_tw(const struct sock *sk)
+{
+ return (1 << READ_ONCE(sk->sk_state)) &
+ (TCPF_LISTEN | TCPF_NEW_SYN_RECV | TCPF_TIME_WAIT);
+}
+
void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);
@@ -2815,8 +2850,6 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
-extern int sysctl_tstamp_allow_data;
-
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d1948d357dad..e9b37b76e894 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2430,6 +2430,20 @@ void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
+static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
+{
+ WARN_ONCE(cond,
+ "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
+ str,
+ tcp_snd_cwnd(tcp_sk(sk)),
+ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
+ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
+ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
+ inet_csk(sk)->icsk_ca_state,
+ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
+ inet_csk(sk)->icsk_pmtu_cookie);
+}
+
/* At how many usecs into the future should the RTO fire? */
static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
@@ -2441,17 +2455,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
} else {
- WARN_ONCE(1,
- "rtx queue emtpy: "
- "out:%u sacked:%u lost:%u retrans:%u "
- "tlp_high_seq:%u sk_state:%u ca_state:%u "
- "advmss:%u mss_cache:%u pmtu:%u\n",
- tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
- tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
- tcp_sk(sk)->tlp_high_seq, sk->sk_state,
- inet_csk(sk)->icsk_ca_state,
- tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
- inet_csk(sk)->icsk_pmtu_cookie);
+ tcp_warn_once(sk, 1, "rtx queue empty: ");
return jiffies_to_usecs(rto);
}
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 1d46460d0fef..df655ce6987d 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -183,7 +183,8 @@ int tcp_ao_hash_skb(unsigned short int family,
const u8 *tkey, int hash_offset, u32 sne);
int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
sockptr_t optval, int optlen);
-struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
+struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
+ struct tcp_ao_info *ao,
int sndid, int rcvid);
int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
struct request_sock *req, struct sk_buff *skb,
diff --git a/include/net/tls.h b/include/net/tls.h
index 3a33924db2bc..61fef2880114 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -390,8 +390,12 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
{
- struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_context *ctx;
+
+ if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
+ return false;
+ ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_tx(ctx);
@@ -399,8 +403,12 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
{
- struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_context *ctx;
+
+ if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
+ return false;
+ ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_rx(ctx);
diff --git a/include/net/udp.h b/include/net/udp.h
index 61222545ab1c..6e89520e100d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -50,29 +50,56 @@ struct udp_skb_cb {
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
/**
- * struct udp_hslot - UDP hash slot
+ * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
*
* @head: head of list of sockets
+ * @nulls_head: head of list of sockets, only used by hash4
* @count: number of sockets in 'head' list
* @lock: spinlock protecting changes to head/count
*/
struct udp_hslot {
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ /* hash4 uses hlist_nulls to avoid moving wrongly onto another
+ * hlist, because rehash() can happen with lookup().
+ */
+ struct hlist_nulls_head nulls_head;
+ };
int count;
spinlock_t lock;
-} __attribute__((aligned(2 * sizeof(long))));
+} __aligned(2 * sizeof(long));
+
+/**
+ * struct udp_hslot_main - UDP hash slot used by udp_table.hash2
+ *
+ * @hslot: basic hash slot
+ * @hash4_cnt: number of sockets in hslot4 of the same
+ * (local port, local address)
+ */
+struct udp_hslot_main {
+ struct udp_hslot hslot; /* must be the first member */
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ u32 hash4_cnt;
+#endif
+} __aligned(2 * sizeof(long));
+#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot))
/**
* struct udp_table - UDP table
*
* @hash: hash table, sockets are hashed on (local port)
* @hash2: hash table, sockets are hashed on (local port, local address)
+ * @hash4: hash table, connected sockets are hashed on
+ * (local port, local address, remote port, remote address)
* @mask: number of slots in hash tables, minus 1
* @log: log2(number of slots in hash table)
*/
struct udp_table {
struct udp_hslot *hash;
- struct udp_hslot *hash2;
+ struct udp_hslot_main *hash2;
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ struct udp_hslot *hash4;
+#endif
unsigned int mask;
unsigned int log;
};
@@ -84,6 +111,7 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
{
return &table->hash[udp_hashfn(net, num, table->mask)];
}
+
/*
* For secondary hash, net_hash_mix() is performed before calling
* udp_hashslot2(), this explains difference with udp_hashslot()
@@ -91,8 +119,89 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
unsigned int hash)
{
- return &table->hash2[hash & table->mask];
+ return &table->hash2[hash & table->mask].hslot;
+}
+
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static inline void udp_table_hash4_init(struct udp_table *table)
+{
+}
+
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ BUILD_BUG();
+ return NULL;
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return false;
+}
+
+static inline unsigned int udp_hash4_slot_size(void)
+{
+ return 0;
+}
+
+static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
+{
+ return false;
+}
+
+static inline void udp_hash4_inc(struct udp_hslot *hslot2)
+{
+}
+
+static inline void udp_hash4_dec(struct udp_hslot *hslot2)
+{
}
+#else /* !CONFIG_BASE_SMALL */
+
+/* Must be called with table->hash2 initialized */
+static inline void udp_table_hash4_init(struct udp_table *table)
+{
+ table->hash4 = (void *)(table->hash2 + (table->mask + 1));
+ for (int i = 0; i <= table->mask; i++) {
+ table->hash2[i].hash4_cnt = 0;
+
+ INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
+ table->hash4[i].count = 0;
+ spin_lock_init(&table->hash4[i].lock);
+ }
+}
+
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ return &table->hash4[hash & table->mask];
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node);
+}
+
+static inline unsigned int udp_hash4_slot_size(void)
+{
+ return sizeof(struct udp_hslot);
+}
+
+static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
+{
+ return UDP_HSLOT_MAIN(hslot2)->hash4_cnt;
+}
+
+static inline void udp_hash4_inc(struct udp_hslot *hslot2)
+{
+ UDP_HSLOT_MAIN(hslot2)->hash4_cnt++;
+}
+
+static inline void udp_hash4_dec(struct udp_hslot *hslot2)
+{
+ UDP_HSLOT_MAIN(hslot2)->hash4_cnt--;
+}
+#endif /* CONFIG_BASE_SMALL */
extern struct proto udp_prot;
@@ -193,13 +302,29 @@ static inline int udp_lib_hash(struct sock *sk)
}
void udp_lib_unhash(struct sock *sk);
-void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4);
+u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
+ const __be32 faddr, const __be16 fport);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
+/* hash4 routines shared between UDPv4/6 */
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static inline void udp_lib_hash4(struct sock *sk, u16 hash)
+{
+}
+
+static inline void udp4_hash4(struct sock *sk)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+void udp_lib_hash4(struct sock *sk, u16 hash);
+void udp4_hash4(struct sock *sk);
+#endif /* CONFIG_BASE_SMALL */
+
int udp_lib_get_port(struct sock *sk, unsigned short snum,
unsigned int hash2_nulladdr);
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 0a5dca2b2b3f..40085afd9160 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -126,8 +126,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
if (likely(!xdp_buff_has_frags(xdp)))
goto out;
- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
- list_del(&pos->xskb_list_node);
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ list_del(&pos->list_node);
xp_free(pos);
}
@@ -140,7 +140,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
- list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
+ list_add_tail(&frag->list_node, &frag->pool->xskb_list);
}
static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
@@ -150,9 +150,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
struct xdp_buff_xsk *frag;
frag = list_first_entry_or_null(&xskb->pool->xskb_list,
- struct xdp_buff_xsk, xskb_list_node);
+ struct xdp_buff_xsk, list_node);
if (frag) {
- list_del(&frag->xskb_list_node);
+ list_del(&frag->list_node);
ret = &frag->xdp;
}
@@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
{
struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
- list_del(&xskb->xskb_list_node);
+ list_del(&xskb->list_node);
}
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
@@ -172,7 +172,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
struct xdp_buff_xsk *frag;
frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
- xskb_list_node);
+ list_node);
return &frag->xdp;
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index a0bdd58f401c..32c09e85a64c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -19,6 +19,7 @@
#include <net/sock.h>
#include <net/dst.h>
+#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/ipv6.h>
@@ -184,10 +185,13 @@ struct xfrm_state {
};
struct hlist_node byspi;
struct hlist_node byseq;
+ struct hlist_node state_cache;
+ struct hlist_node state_cache_input;
refcount_t refcnt;
spinlock_t lock;
+ u32 pcpu_num;
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
@@ -351,7 +355,7 @@ void xfrm_if_unregister_cb(void);
struct xfrm_dst_lookup_params {
struct net *net;
- int tos;
+ dscp_t dscp;
int oif;
xfrm_address_t *saddr;
xfrm_address_t *daddr;
@@ -536,6 +540,7 @@ struct xfrm_policy_queue {
* @xp_net: network namespace the policy lives in
* @bydst: hlist node for SPD hash table or rbtree list
* @byidx: hlist node for index hash table
+ * @state_cache_list: hlist head for policy cached xfrm states
* @lock: serialize changes to policy structure members
* @refcnt: reference count, freed once it reaches 0
* @pos: kernel internal tie-breaker to determine age of policy
@@ -566,6 +571,8 @@ struct xfrm_policy {
struct hlist_node bydst;
struct hlist_node byidx;
+ struct hlist_head state_cache_list;
+
/* This lock only affects elements except for entry. */
rwlock_t lock;
refcount_t refcnt;
@@ -1645,6 +1652,10 @@ int xfrm_state_update(struct xfrm_state *x);
struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family);
+struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ __be32 spi, u8 proto,
+ unsigned short family);
struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@@ -1684,7 +1695,7 @@ struct xfrmk_spdinfo {
u32 spdhmcnt;
};
-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
@@ -1796,7 +1807,7 @@ int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
struct netlink_ext_ack *extack);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
- u8 mode, u32 reqid, u32 if_id, u8 proto,
+ u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index bacb33f1e3e5..bb03cee716b3 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -28,9 +28,7 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- u64 orig_addr;
- struct list_head free_list_node;
- struct list_head xskb_list_node;
+ struct list_head list_node;
};
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
@@ -78,6 +76,7 @@ struct xsk_buff_pool {
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
+ u32 xdp_zc_max_segs;
u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
@@ -120,7 +119,6 @@ void xp_free(struct xdp_buff_xsk *xskb);
static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
u64 addr)
{
- xskb->orig_addr = addr;
xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
}
@@ -222,14 +220,19 @@ static inline void xp_release(struct xdp_buff_xsk *xskb)
xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
}
-static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
+ struct xsk_buff_pool *pool)
{
- u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ u64 orig_addr = xskb->xdp.data - pool->addrs;
+ u64 offset;
- offset += xskb->pool->headroom;
- if (!xskb->pool->unaligned)
- return xskb->orig_addr + offset;
- return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+ if (!pool->unaligned)
+ return orig_addr;
+
+ offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ orig_addr -= offset;
+ offset += pool->headroom;
+ return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 3c5899290aed..6616348e59b9 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -15,7 +15,7 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/local_lock.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fcoe_sysfs.h>
diff --git a/include/soc/amlogic/reset-meson-aux.h b/include/soc/amlogic/reset-meson-aux.h
new file mode 100644
index 000000000000..d8a15d48c984
--- /dev/null
+++ b/include/soc/amlogic/reset-meson-aux.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_RESET_MESON_AUX_H
+#define __SOC_RESET_MESON_AUX_H
+
+#include <linux/err.h>
+
+struct device;
+struct regmap;
+
+#if IS_ENABLED(CONFIG_RESET_MESON_AUX)
+int devm_meson_rst_aux_register(struct device *dev,
+ struct regmap *map,
+ const char *adev_name);
+#else
+static inline int devm_meson_rst_aux_register(struct device *dev,
+ struct regmap *map,
+ const char *adev_name)
+{
+ return 0;
+}
+#endif
+
+#endif /* __SOC_RESET_MESON_AUX_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 0d3d6beb7fdb..7f7a4932d7f1 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -242,7 +242,7 @@ static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
{
- return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+ return be16_to_cpu(sg->offset) & QM_SG_OFF_MASK;
}
/* "Frame Dequeue Response" */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 1527d5d45e01..bd0ea07338eb 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -99,7 +99,7 @@ TRACE_EVENT(block_rq_requeue,
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
- __entry->ioprio = rq->ioprio;
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
@@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
__entry->error = blk_status_to_errno(error);
- __entry->ioprio = rq->ioprio;
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
@@ -209,7 +209,7 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
- __entry->ioprio = rq->ioprio;
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index af6b3827fb1d..4df93ca9b7a8 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1706,9 +1706,10 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec),
+ TP_ARGS(fs_info, rec, bytenr),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -1716,7 +1717,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = rec->bytenr;
+ __entry->bytenr = bytenr;
__entry->num_bytes = rec->num_bytes;
),
@@ -1727,17 +1728,19 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
TRACE_EVENT(qgroup_num_dirty_extents,
@@ -2341,7 +2344,6 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
DECLARE_EVENT_CLASS(btrfs__space_info_update,
@@ -2553,10 +2555,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_count,
TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
- TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr,
- u64 last_root_id, u64 last_ino),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr),
- TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino),
+ TP_ARGS(fs_info, nr),
TP_STRUCT__entry_btrfs(
__field( long, nr_to_scan )
@@ -2566,10 +2567,11 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
),
TP_fast_assign_btrfs(fs_info,
- __entry->nr_to_scan = nr_to_scan;
+ __entry->nr_to_scan = \
+ atomic64_read(&fs_info->em_shrinker_nr_to_scan);
__entry->nr = nr;
- __entry->last_root_id = last_root_id;
- __entry->last_ino = last_ino;
+ __entry->last_root_id = fs_info->em_shrinker_last_root;
+ __entry->last_ino = fs_info->em_shrinker_last_ino;
),
TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
@@ -2579,10 +2581,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
- TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr,
- u64 last_root_id, u64 last_ino),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr),
- TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino),
+ TP_ARGS(fs_info, nr_dropped, nr),
TP_STRUCT__entry_btrfs(
__field( long, nr_dropped )
@@ -2594,8 +2595,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
TP_fast_assign_btrfs(fs_info,
__entry->nr_dropped = nr_dropped;
__entry->nr = nr;
- __entry->last_root_id = last_root_id;
- __entry->last_ino = last_ino;
+ __entry->last_root_id = fs_info->em_shrinker_last_root;
+ __entry->last_ino = fs_info->em_shrinker_last_ino;
),
TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h
new file mode 100644
index 000000000000..8331c904a9ba
--- /dev/null
+++ b/include/trace/events/hugetlbfs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hugetlbfs
+
+#if !defined(_TRACE_HUGETLBFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HUGETLBFS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(hugetlbfs_alloc_inode,
+
+ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+ TP_ARGS(inode, dir, mode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, dir)
+ __field(__u16, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->dir = dir->i_ino;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->dir, __entry->mode)
+);
+
+DECLARE_EVENT_CLASS(hugetlbfs__inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(__u16, mode)
+ __field(loff_t, size)
+ __field(unsigned int, nlink)
+ __field(unsigned int, seals)
+ __field(blkcnt_t, blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->size = inode->i_size;
+ __entry->nlink = inode->i_nlink;
+ __entry->seals = HUGETLBFS_I(inode)->seals;
+ __entry->blocks = inode->i_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o size %lld nlink %u seals %u blocks %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, __entry->size, __entry->nlink, __entry->seals,
+ (unsigned long long)__entry->blocks)
+);
+
+DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_evict_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_free_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+TRACE_EVENT(hugetlbfs_setattr,
+
+ TP_PROTO(struct inode *inode, struct dentry *dentry,
+ struct iattr *attr),
+
+ TP_ARGS(inode, dentry, attr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, dentry->d_name.name)
+ __field(unsigned int, ia_valid)
+ __field(unsigned int, ia_mode)
+ __field(loff_t, old_size)
+ __field(loff_t, ia_size)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->d_len = dentry->d_name.len;
+ __assign_str(d_name);
+ __entry->ia_valid = attr->ia_valid;
+ __entry->ia_mode = attr->ia_mode;
+ __entry->old_size = inode->i_size;
+ __entry->ia_size = attr->ia_size;
+ ),
+
+ TP_printk("dev %d,%d ino %lu name %.*s valid %#x mode 0%o old_size %lld size %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino,
+ __entry->d_len, __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
+ __entry->old_size, __entry->ia_size)
+);
+
+TRACE_EVENT(hugetlbfs_fallocate,
+
+ TP_PROTO(struct inode *inode, int mode,
+ loff_t offset, loff_t len, int ret),
+
+ TP_ARGS(inode, mode, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, mode)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(loff_t, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = mode;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->size = inode->i_size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o offset %lld len %lld size %lld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long)__entry->ino, __entry->mode,
+ (unsigned long long)__entry->offset,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->size,
+ __entry->ret)
+);
+
+#endif /* _TRACE_HUGETLBFS_H */
+
+ /* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 412c9c210a32..fb81c533b310 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link,
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
- * @req: pointer to a submitted request
- * @user_data: user data associated with the request
- * @res: result of the request
- * @cflags: completion flags
- * @extra1: extra 64-bit data for CQE32
- * @extra2: extra 64-bit data for CQE32
- *
+ * @req: (optional) pointer to a submitted request
+ * @cqe: pointer to the filled in CQE being posted
*/
TRACE_EVENT(io_uring_complete,
- TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
- u64 extra1, u64 extra2),
+TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
- TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
+ TP_ARGS(ctx, req, cqe),
TP_STRUCT__entry (
__field( void *, ctx )
@@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete,
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->user_data = user_data;
- __entry->res = res;
- __entry->cflags = cflags;
- __entry->extra1 = extra1;
- __entry->extra2 = extra2;
+ __entry->user_data = cqe->user_data;
+ __entry->res = cqe->res;
+ __entry->cflags = cqe->flags;
+ __entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0;
+ __entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0;
),
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
index f0f7b3cb2041..c1c50df9ecfd 100644
--- a/include/trace/events/mce.h
+++ b/include/trace/events/mce.h
@@ -19,9 +19,9 @@
TRACE_EVENT(mce_record,
- TP_PROTO(struct mce *m),
+ TP_PROTO(struct mce_hw_err *err),
- TP_ARGS(m),
+ TP_ARGS(err),
TP_STRUCT__entry(
__field( u64, mcgcap )
@@ -43,31 +43,33 @@ TRACE_EVENT(mce_record,
__field( u8, bank )
__field( u8, cpuvendor )
__field( u32, microcode )
+ __dynamic_array(u8, v_data, sizeof(err->vendor))
),
TP_fast_assign(
- __entry->mcgcap = m->mcgcap;
- __entry->mcgstatus = m->mcgstatus;
- __entry->status = m->status;
- __entry->addr = m->addr;
- __entry->misc = m->misc;
- __entry->synd = m->synd;
- __entry->ipid = m->ipid;
- __entry->ip = m->ip;
- __entry->tsc = m->tsc;
- __entry->ppin = m->ppin;
- __entry->walltime = m->time;
- __entry->cpu = m->extcpu;
- __entry->cpuid = m->cpuid;
- __entry->apicid = m->apicid;
- __entry->socketid = m->socketid;
- __entry->cs = m->cs;
- __entry->bank = m->bank;
- __entry->cpuvendor = m->cpuvendor;
- __entry->microcode = m->microcode;
+ __entry->mcgcap = err->m.mcgcap;
+ __entry->mcgstatus = err->m.mcgstatus;
+ __entry->status = err->m.status;
+ __entry->addr = err->m.addr;
+ __entry->misc = err->m.misc;
+ __entry->synd = err->m.synd;
+ __entry->ipid = err->m.ipid;
+ __entry->ip = err->m.ip;
+ __entry->tsc = err->m.tsc;
+ __entry->ppin = err->m.ppin;
+ __entry->walltime = err->m.time;
+ __entry->cpu = err->m.extcpu;
+ __entry->cpuid = err->m.cpuid;
+ __entry->apicid = err->m.apicid;
+ __entry->socketid = err->m.socketid;
+ __entry->cs = err->m.cs;
+ __entry->bank = err->m.bank;
+ __entry->cpuvendor = err->m.cpuvendor;
+ __entry->microcode = err->m.microcode;
+ memcpy(__get_dynamic_array(v_data), &err->vendor, sizeof(err->vendor));
),
- TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR: %016Lx, MISC: %016Lx, SYND: %016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x",
+ TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016llx, IPID: %016llx, ADDR: %016llx, MISC: %016llx, SYND: %016llx, RIP: %02x:<%016llx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x, vendor data: %s",
__entry->cpu,
__entry->mcgcap, __entry->mcgstatus,
__entry->bank, __entry->status,
@@ -83,7 +85,8 @@ TRACE_EVENT(mce_record,
__entry->walltime,
__entry->socketid,
__entry->apicid,
- __entry->microcode)
+ __entry->microcode,
+ __print_dynamic_array(v_data, sizeof(u8)))
);
#endif /* _TRACE_MCE_H */
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 69975c9c6823..bf511bca896e 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -450,7 +450,7 @@ TRACE_EVENT(netfs_folio,
struct address_space *__m = READ_ONCE(folio->mapping);
__entry->ino = __m ? __m->host->i_ino : 0;
__entry->why = why;
- __entry->index = folio_index(folio);
+ __entry->index = folio->index;
__entry->nr = folio_nr_pages(folio);
),
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
index 8022701c446d..8ba898fd335c 100644
--- a/include/trace/events/pwm.h
+++ b/include/trace/events/pwm.h
@@ -8,15 +8,135 @@
#include <linux/pwm.h>
#include <linux/tracepoint.h>
+#define TP_PROTO_pwm(args...) \
+ TP_PROTO(struct pwm_device *pwm, args)
+
+#define TP_ARGS_pwm(args...) \
+ TP_ARGS(pwm, args)
+
+#define TP_STRUCT__entry_pwm(args...) \
+ TP_STRUCT__entry( \
+ __field(unsigned int, chipid) \
+ __field(unsigned int, hwpwm) \
+ args)
+
+#define TP_fast_assign_pwm(args...) \
+ TP_fast_assign( \
+ __entry->chipid = pwm->chip->id; \
+ __entry->hwpwm = pwm->hwpwm; \
+ args)
+
+#define TP_printk_pwm(fmt, args...) \
+ TP_printk("pwmchip%u.%u: " fmt, __entry->chipid, __entry->hwpwm, args)
+
+#define __field_pwmwf(wf) \
+ __field(u64, wf ## _period_length_ns) \
+ __field(u64, wf ## _duty_length_ns) \
+ __field(u64, wf ## _duty_offset_ns) \
+
+#define fast_assign_pwmwf(wf) \
+ __entry->wf ## _period_length_ns = wf->period_length_ns; \
+ __entry->wf ## _duty_length_ns = wf->duty_length_ns; \
+ __entry->wf ## _duty_offset_ns = wf->duty_offset_ns
+
+#define printk_pwmwf_format(wf) \
+ "%lld/%lld [+%lld]"
+
+#define printk_pwmwf_formatargs(wf) \
+ __entry->wf ## _duty_length_ns, __entry->wf ## _period_length_ns, __entry->wf ## _duty_offset_ns
+
+TRACE_EVENT(pwm_round_waveform_tohw,
+
+ TP_PROTO_pwm(const struct pwm_waveform *wf, void *wfhw, int err),
+
+ TP_ARGS_pwm(wf, wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field_pwmwf(wf)
+ __field(void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ fast_assign_pwmwf(wf);
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm(printk_pwmwf_format(wf) " > %p err=%d",
+ printk_pwmwf_formatargs(wf), __entry->wfhw, __entry->err)
+);
+
+TRACE_EVENT(pwm_round_waveform_fromhw,
+
+ TP_PROTO_pwm(const void *wfhw, struct pwm_waveform *wf, int err),
+
+ TP_ARGS_pwm(wfhw, wf, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(const void *, wfhw)
+ __field_pwmwf(wf)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ fast_assign_pwmwf(wf);
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p > " printk_pwmwf_format(wf) " err=%d",
+ __entry->wfhw, printk_pwmwf_formatargs(wf), __entry->err)
+);
+
+TRACE_EVENT(pwm_read_waveform,
+
+ TP_PROTO_pwm(void *wfhw, int err),
+
+ TP_ARGS_pwm(wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p err=%d",
+ __entry->wfhw, __entry->err)
+);
+
+TRACE_EVENT(pwm_write_waveform,
+
+ TP_PROTO_pwm(const void *wfhw, int err),
+
+ TP_ARGS_pwm(wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(const void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p err=%d",
+ __entry->wfhw, __entry->err)
+);
+
+
DECLARE_EVENT_CLASS(pwm,
TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err),
TP_ARGS(pwm, state, err),
- TP_STRUCT__entry(
- __field(unsigned int, chipid)
- __field(unsigned int, hwpwm)
+ TP_STRUCT__entry_pwm(
__field(u64, period)
__field(u64, duty_cycle)
__field(enum pwm_polarity, polarity)
@@ -24,9 +144,7 @@ DECLARE_EVENT_CLASS(pwm,
__field(int, err)
),
- TP_fast_assign(
- __entry->chipid = pwm->chip->id;
- __entry->hwpwm = pwm->hwpwm;
+ TP_fast_assign_pwm(
__entry->period = state->period;
__entry->duty_cycle = state->duty_cycle;
__entry->polarity = state->polarity;
@@ -34,8 +152,8 @@ DECLARE_EVENT_CLASS(pwm,
__entry->err = err;
),
- TP_printk("pwmchip%u.%u: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
- __entry->chipid, __entry->hwpwm, __entry->period, __entry->duty_cycle,
+ TP_printk_pwm("period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
+ __entry->period, __entry->duty_cycle,
__entry->polarity, __entry->enabled, __entry->err)
);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index a1b126a6b0d7..d03e0bd8c028 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -287,6 +287,7 @@
EM(rxrpc_call_see_input, "SEE input ") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
+ EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
E_(rxrpc_call_see_zap, "SEE zap ")
#define rxrpc_txqueue_traces \
@@ -772,6 +773,31 @@ TRACE_EVENT(rxrpc_rx_done,
TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
);
+TRACE_EVENT(rxrpc_abort_call,
+ TP_PROTO(const struct rxrpc_call *call, int abort_code),
+
+ TP_ARGS(call, abort_code),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(int, abort_code)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->call_nr = call->debug_id;
+ __entry->why = call->send_abort_why;
+ __entry->abort_code = abort_code;
+ __entry->error = call->send_abort_err;
+ ),
+
+ TP_printk("c=%08x a=%d e=%d %s",
+ __entry->call_nr,
+ __entry->abort_code, __entry->error,
+ __print_symbolic(__entry->why, rxrpc_abort_reasons))
+ );
+
TRACE_EVENT(rxrpc_abort,
TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
diff --git a/include/trace/events/timestamp.h b/include/trace/events/timestamp.h
new file mode 100644
index 000000000000..c9e5ec930054
--- /dev/null
+++ b/include/trace/events/timestamp.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timestamp
+
+#if !defined(_TRACE_TIMESTAMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMESTAMP_H
+
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+
+#define CTIME_QUERIED_FLAGS \
+ { I_CTIME_QUERIED, "Q" }
+
+DECLARE_EVENT_CLASS(ctime,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+
+ TP_ARGS(inode, ctime),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(time64_t, ctime_s)
+ __field(u32, ctime_ns)
+ __field(u32, gen)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->ctime_s = ctime->tv_sec;
+ __entry->ctime_ns = ctime->tv_nsec;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->ctime_s, __entry->ctime_ns
+ )
+);
+
+DEFINE_EVENT(ctime, inode_set_ctime_to_ts,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+ TP_ARGS(inode, ctime));
+
+DEFINE_EVENT(ctime, ctime_xchg_skip,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+ TP_ARGS(inode, ctime));
+
+TRACE_EVENT(ctime_ns_xchg,
+ TP_PROTO(struct inode *inode,
+ u32 old,
+ u32 new,
+ u32 cur),
+
+ TP_ARGS(inode, old, new, cur),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(u32, gen)
+ __field(u32, old)
+ __field(u32, new)
+ __field(u32, cur)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->old = old;
+ __entry->new = new;
+ __entry->cur = cur;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u old=%u:%s new=%u cur=%u:%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->old & ~I_CTIME_QUERIED,
+ __print_flags(__entry->old & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS),
+ __entry->new,
+ __entry->cur & ~I_CTIME_QUERIED,
+ __print_flags(__entry->cur & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS)
+ )
+);
+
+TRACE_EVENT(fill_mg_cmtime,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime,
+ struct timespec64 *mtime),
+
+ TP_ARGS(inode, ctime, mtime),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(time64_t, ctime_s)
+ __field(time64_t, mtime_s)
+ __field(u32, ctime_ns)
+ __field(u32, mtime_ns)
+ __field(u32, gen)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->ctime_s = ctime->tv_sec;
+ __entry->mtime_s = mtime->tv_sec;
+ __entry->ctime_ns = ctime->tv_nsec;
+ __entry->mtime_ns = mtime->tv_nsec;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u mtime=%lld.%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->ctime_s, __entry->ctime_ns,
+ __entry->mtime_s, __entry->mtime_ns
+ )
+);
+#endif /* _TRACE_TIMESTAMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
index c1fb1355d309..1e7b0bef95f5 100644
--- a/include/trace/stages/stage3_trace_output.h
+++ b/include/trace/stages/stage3_trace_output.h
@@ -119,6 +119,14 @@
trace_print_array_seq(p, array, count, el_size); \
})
+#undef __print_dynamic_array
+#define __print_dynamic_array(array, el_size) \
+ ({ \
+ __print_array(__get_dynamic_array(array), \
+ __get_dynamic_array_len(array) / (el_size), \
+ (el_size)); \
+ })
+
#undef __print_hex_dump
#define __print_hex_dump(prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii) \
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
index bcb960d16fc0..fcd564a590f4 100644
--- a/include/trace/stages/stage7_class_define.h
+++ b/include/trace/stages/stage7_class_define.h
@@ -22,6 +22,7 @@
#undef __get_rel_cpumask
#undef __get_rel_sockaddr
#undef __print_array
+#undef __print_dynamic_array
#undef __print_hex_dump
#undef __get_buf
diff --git a/include/uapi/asm-generic/ioctl.h b/include/uapi/asm-generic/ioctl.h
index a84f4db8a250..e3290a5824c9 100644
--- a/include/uapi/asm-generic/ioctl.h
+++ b/include/uapi/asm-generic/ioctl.h
@@ -82,13 +82,13 @@
* NOTE: _IOW means userland is writing and kernel is reading. _IOR
* means userland is reading and kernel is writing.
*/
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,argtype) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(argtype)))
+#define _IOW(type,nr,argtype) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(argtype)))
+#define _IOWR(type,nr,argtype) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(argtype)))
+#define _IOR_BAD(type,nr,argtype) _IOC(_IOC_READ,(type),(nr),sizeof(argtype))
+#define _IOW_BAD(type,nr,argtype) _IOC(_IOC_WRITE,(type),(nr),sizeof(argtype))
+#define _IOWR_BAD(type,nr,argtype) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(argtype))
/* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
index 57e8195d0b53..5e3d61ddbd8c 100644
--- a/include/uapi/asm-generic/mman.h
+++ b/include/uapi/asm-generic/mman.h
@@ -19,4 +19,8 @@
#define MCL_FUTURE 2 /* lock all future mappings */
#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
+#define SHADOW_STACK_SET_MARKER (1ULL << 1) /* Set up a top of stack marker in the shadow stack */
+
+
#endif /* __ASM_GENERIC_MMAN_H */
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index b7bc545ec3b2..5a1ca43b5fc6 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -46,7 +46,7 @@ union __sifields {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
+ int _sys_private; /* Not used by the kernel. Historic leftover. Always 0. */
} _timer;
/* POSIX.1b signals */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 3b4e3e815602..deacfd6dd197 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -141,6 +141,8 @@
#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
#define SO_DEVMEM_DONTNEED 80
+#define SCM_TS_OPT_ID 81
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 5bf6148cac2b..88dc393c2bca 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -841,8 +841,17 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#define __NR_mseal 462
__SYSCALL(__NR_mseal, sys_mseal)
+#define __NR_setxattrat 463
+__SYSCALL(__NR_setxattrat, sys_setxattrat)
+#define __NR_getxattrat 464
+__SYSCALL(__NR_getxattrat, sys_getxattrat)
+#define __NR_listxattrat 465
+__SYSCALL(__NR_listxattrat, sys_listxattrat)
+#define __NR_removexattrat 466
+__SYSCALL(__NR_removexattrat, sys_removexattrat)
+
#undef __NR_syscalls
-#define __NR_syscalls 463
+#define __NR_syscalls 467
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 6e25753015df..439132a819ea 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -9,6 +9,7 @@
#include <asm/byteorder.h>
#include <linux/if_ether.h>
+#include <linux/stddef.h>
#include <linux/types.h>
/**
@@ -593,19 +594,6 @@ struct batadv_tvlv_gateway_data {
};
/**
- * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
- * @flags: translation table flags (see batadv_tt_data_flags)
- * @ttvn: translation table version number
- * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
- * one batadv_tvlv_tt_vlan_data object per announced vlan
- */
-struct batadv_tvlv_tt_data {
- __u8 flags;
- __u8 ttvn;
- __be16 num_vlan;
-};
-
-/**
* struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
* the tt tvlv container
* @crc: crc32 checksum of the entries belonging to this vlan
@@ -619,6 +607,21 @@ struct batadv_tvlv_tt_vlan_data {
};
/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
+ * one batadv_tvlv_tt_vlan_data object per announced vlan
+ * @vlan_data: array of batadv_tvlv_tt_vlan_data objects
+ */
+struct batadv_tvlv_tt_data {
+ __u8 flags;
+ __u8 ttvn;
+ __be16 num_vlan;
+ struct batadv_tvlv_tt_vlan_data vlan_data[] __counted_by_be(num_vlan);
+};
+
+/**
* struct batadv_tvlv_tt_change - translation table diff data
* @flags: status indicators concerning the non-mesh client (see
* batadv_tt_client_flags)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4a939c90dc2e..4162afc6b5d0 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1116,6 +1116,7 @@ enum bpf_attach_type {
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
+ BPF_TRACE_UPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
@@ -1973,6 +1974,8 @@ union bpf_attr {
* program.
* Return
* The SMP id of the processor running the program.
+ * Attributes
+ * __bpf_fastcall
*
* long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
* Description
@@ -3104,10 +3107,6 @@ union bpf_attr {
* with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
* option, and in this case it only works on functions tagged with
* **ALLOW_ERROR_INJECTION** in the kernel code.
- *
- * Also, the helper is only available for the architectures having
- * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
- * x86 architecture is the only one to support this feature.
* Return
* 0
*
@@ -5372,7 +5371,7 @@ union bpf_attr {
* Currently, the **flags** must be 0. Currently, nr_loops is
* limited to 1 << 23 (~8 million) loops.
*
- * long (\*callback_fn)(u32 index, void \*ctx);
+ * long (\*callback_fn)(u64 index, void \*ctx);
*
* where **index** is the current index in the loop. The index
* is zero-indexed.
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index cdf6ad872149..d3b222d7af24 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -1049,6 +1049,29 @@ struct btrfs_ioctl_encoded_io_args {
#define BTRFS_ENCODED_IO_ENCRYPTION_NONE 0
#define BTRFS_ENCODED_IO_ENCRYPTION_TYPES 1
+/*
+ * Wait for subvolume cleaning process. This queries the kernel queue and it
+ * can change between the calls.
+ *
+ * - FOR_ONE - specify the subvolid
+ * - FOR_QUEUED - wait for all currently queued
+ * - COUNT - count number of queued
+ * - PEEK_FIRST - read which is the first in the queue (to be cleaned or being
+ * cleaned already), or 0 if the queue is empty
+ * - PEEK_LAST - read the last subvolid in the queue, or 0 if the queue is empty
+ */
+struct btrfs_ioctl_subvol_wait {
+ __u64 subvolid;
+ __u32 mode;
+ __u32 count;
+};
+
+#define BTRFS_SUBVOL_SYNC_WAIT_FOR_ONE (0)
+#define BTRFS_SUBVOL_SYNC_WAIT_FOR_QUEUED (1)
+#define BTRFS_SUBVOL_SYNC_COUNT (2)
+#define BTRFS_SUBVOL_SYNC_PEEK_FIRST (3)
+#define BTRFS_SUBVOL_SYNC_PEEK_LAST (4)
+
/* Error codes as returned by the kernel */
enum btrfs_err_code {
BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
@@ -1181,6 +1204,8 @@ enum btrfs_err_code {
struct btrfs_ioctl_encoded_io_args)
#define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \
struct btrfs_ioctl_encoded_io_args)
+#define BTRFS_IOC_SUBVOL_SYNC_WAIT _IOW(BTRFS_IOCTL_MAGIC, 65, \
+ struct btrfs_ioctl_subvol_wait)
#ifdef __cplusplus
}
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 20a6c0fc149e..db05e0419972 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -64,6 +64,7 @@ enum crypto_attr_type_t {
CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_KPP, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_ACOMP, /* No longer supported, do not use. */
+ CRYPTOCFGA_REPORT_SIG, /* struct crypto_report_sig */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -207,6 +208,10 @@ struct crypto_report_acomp {
char type[CRYPTO_MAX_NAME];
};
+struct crypto_report_sig {
+ char type[CRYPTO_MAX_NAME];
+};
+
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index b0654ade7b7e..bf97d4b6d51f 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -79,6 +79,29 @@ enum dpll_lock_status_error {
DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1)
};
+/*
+ * level of quality of a clock device. This mainly applies when the dpll
+ * lock-status is DPLL_LOCK_STATUS_HOLDOVER. The current list is defined
+ * according to the table 11-7 contained in ITU-T G.8264/Y.1364 document. One
+ * may extend this list freely by other ITU-T defined clock qualities, or
+ * different ones defined by another standardization body (for those, please
+ * use different prefix).
+ */
+enum dpll_clock_quality_level {
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRC = 1,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_A,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_B,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEC1,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRTC,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRTC,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEEC,
+ DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRC,
+
+ /* private: */
+ __DPLL_CLOCK_QUALITY_LEVEL_MAX,
+ DPLL_CLOCK_QUALITY_LEVEL_MAX = (__DPLL_CLOCK_QUALITY_LEVEL_MAX - 1)
+};
+
#define DPLL_TEMP_DIVIDER 1000
/**
@@ -180,6 +203,7 @@ enum dpll_a {
DPLL_A_TEMP,
DPLL_A_TYPE,
DPLL_A_LOCK_STATUS_ERROR,
+ DPLL_A_CLOCK_QUALITY_LEVEL,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index b9935988da5c..9adc218fb6df 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -443,6 +443,7 @@ typedef struct elf64_shdr {
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
#define NT_ARM_POE 0x40f /* ARM POE registers */
+#define NT_ARM_GCS 0x410 /* ARM GCS state */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index c405ed63acfa..7e1b3820f91f 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2526,12 +2526,19 @@ struct ethtool_link_settings {
__u8 master_slave_state;
__u8 rate_matching;
__u32 reserved[7];
+#ifndef __KERNEL__
+ /* Linux builds with -Wflex-array-member-not-at-end but does
+ * not use the "link_mode_masks" member. Leave it defined for
+ * userspace for now, and when userspace wants to start using
+ * -Wfamnae, we'll need a new solution.
+ */
__u32 link_mode_masks[];
/* layout of link_mode_masks fields:
* __u32 map_supported[link_mode_masks_nwords];
* __u32 map_advertising[link_mode_masks_nwords];
* __u32 map_lp_advertising[link_mode_masks_nwords];
*/
+#endif
};
/**
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 87e2dec79fea..a40833bf2855 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -154,8 +154,4 @@
usable with open_by_handle_at(2). */
#define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */
-#if defined(__KERNEL__)
-#define AT_GETATTR_NOSEC 0x80000000
-#endif
-
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 6dc258993b17..2575e0cd9b48 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -377,6 +377,7 @@ enum {
IFLA_GSO_IPV4_MAX_SIZE,
IFLA_GRO_IPV4_MAX_SIZE,
IFLA_DPLL_PIN,
+ IFLA_MAX_PACING_OFFLOAD_HORIZON,
__IFLA_MAX
};
@@ -1292,6 +1293,19 @@ enum netkit_mode {
NETKIT_L3,
};
+/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to
+ * the BPF program if attached. This also means the latter can
+ * consume the two fields if they were populated earlier.
+ *
+ * NETKIT_SCRUB_DEFAULT zeroes skb->{mark,priority} fields before
+ * invoking the attached BPF program when the peer device resides
+ * in a different network namespace. This is the default behavior.
+ */
+enum netkit_scrub {
+ NETKIT_SCRUB_NONE,
+ NETKIT_SCRUB_DEFAULT,
+};
+
enum {
IFLA_NETKIT_UNSPEC,
IFLA_NETKIT_PEER_INFO,
@@ -1299,6 +1313,8 @@ enum {
IFLA_NETKIT_POLICY,
IFLA_NETKIT_PEER_POLICY,
IFLA_NETKIT_MODE,
+ IFLA_NETKIT_SCRUB,
+ IFLA_NETKIT_PEER_SCRUB,
__IFLA_NETKIT_MAX,
};
#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
@@ -1942,6 +1958,7 @@ struct ifla_rmnet_flags {
enum {
IFLA_MCTP_UNSPEC,
IFLA_MCTP_NET,
+ IFLA_MCTP_PHYS_BINDING,
__IFLA_MCTP_MAX,
};
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 1fe79e750470..4418d0192959 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -200,6 +200,9 @@ enum io_uring_sqe_flags_bit {
*/
#define IORING_SETUP_NO_SQARRAY (1U << 16)
+/* Use hybrid poll in iopoll process */
+#define IORING_SETUP_HYBRID_IOPOLL (1U << 17)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -416,6 +419,9 @@ enum io_uring_msg_ring_flags {
* IORING_NOP_INJECT_RESULT Inject result from sqe->result
*/
#define IORING_NOP_INJECT_RESULT (1U << 0)
+#define IORING_NOP_FILE (1U << 1)
+#define IORING_NOP_FIXED_FILE (1U << 2)
+#define IORING_NOP_FIXED_BUFFER (1U << 3)
/*
* IO completion data structure (Completion Queue Entry)
@@ -518,6 +524,7 @@ struct io_cqring_offsets {
#define IORING_ENTER_EXT_ARG (1U << 3)
#define IORING_ENTER_REGISTERED_RING (1U << 4)
#define IORING_ENTER_ABS_TIMER (1U << 5)
+#define IORING_ENTER_EXT_ARG_REG (1U << 6)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -612,6 +619,16 @@ enum io_uring_register_op {
/* clone registered buffers from source ring to current ring */
IORING_REGISTER_CLONE_BUFFERS = 30,
+ /* send MSG_RING without having a ring */
+ IORING_REGISTER_SEND_MSG_RING = 31,
+
+ /* 32 reserved for zc rx */
+
+ /* resize CQ ring */
+ IORING_REGISTER_RESIZE_RINGS = 33,
+
+ IORING_REGISTER_MEM_REGION = 34,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -632,6 +649,31 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
+enum {
+ /* initialise with user provided memory pointed by user_addr */
+ IORING_MEM_REGION_TYPE_USER = 1,
+};
+
+struct io_uring_region_desc {
+ __u64 user_addr;
+ __u64 size;
+ __u32 flags;
+ __u32 id;
+ __u64 mmap_offset;
+ __u64 __resv[4];
+};
+
+enum {
+ /* expose the region as registered wait arguments */
+ IORING_MEM_REGION_REG_WAIT_ARG = 1,
+};
+
+struct io_uring_mem_region_reg {
+ __u64 region_uptr; /* struct io_uring_region_desc * */
+ __u64 flags;
+ __u64 __resv[2];
+};
+
/*
* Register a fully sparse file space, rather than pass in an array of all
* -1 file descriptors.
@@ -698,13 +740,17 @@ struct io_uring_clock_register {
};
enum {
- IORING_REGISTER_SRC_REGISTERED = 1,
+ IORING_REGISTER_SRC_REGISTERED = (1U << 0),
+ IORING_REGISTER_DST_REPLACE = (1U << 1),
};
struct io_uring_clone_buffers {
__u32 src_fd;
__u32 flags;
- __u32 pad[6];
+ __u32 src_off;
+ __u32 dst_off;
+ __u32 nr;
+ __u32 pad[3];
};
struct io_uring_buf {
@@ -768,12 +814,40 @@ struct io_uring_buf_status {
__u32 resv[8];
};
+enum io_uring_napi_op {
+ /* register/ungister backward compatible opcode */
+ IO_URING_NAPI_REGISTER_OP = 0,
+
+ /* opcodes to update napi_list when static tracking is used */
+ IO_URING_NAPI_STATIC_ADD_ID = 1,
+ IO_URING_NAPI_STATIC_DEL_ID = 2
+};
+
+enum io_uring_napi_tracking_strategy {
+ /* value must be 0 for backward compatibility */
+ IO_URING_NAPI_TRACKING_DYNAMIC = 0,
+ IO_URING_NAPI_TRACKING_STATIC = 1,
+ IO_URING_NAPI_TRACKING_INACTIVE = 255
+};
+
/* argument for IORING_(UN)REGISTER_NAPI */
struct io_uring_napi {
__u32 busy_poll_to;
__u8 prefer_busy_poll;
- __u8 pad[3];
- __u64 resv;
+
+ /* a io_uring_napi_op value */
+ __u8 opcode;
+ __u8 pad[2];
+
+ /*
+ * for IO_URING_NAPI_REGISTER_OP, it is a
+ * io_uring_napi_tracking_strategy value.
+ *
+ * for IO_URING_NAPI_STATIC_ADD_ID/IO_URING_NAPI_STATIC_DEL_ID
+ * it is the napi id to add/del from napi_list.
+ */
+ __u32 op_param;
+ __u32 resv;
};
/*
@@ -795,6 +869,43 @@ enum io_uring_register_restriction_op {
IORING_RESTRICTION_LAST
};
+enum {
+ IORING_REG_WAIT_TS = (1U << 0),
+};
+
+/*
+ * Argument for IORING_REGISTER_CQWAIT_REG, registering a region of
+ * struct io_uring_reg_wait that can be indexed when io_uring_enter(2) is
+ * called rather than pass in a wait argument structure separately.
+ */
+struct io_uring_cqwait_reg_arg {
+ __u32 flags;
+ __u32 struct_size;
+ __u32 nr_entries;
+ __u32 pad;
+ __u64 user_addr;
+ __u64 pad2[3];
+};
+
+/*
+ * Argument for io_uring_enter(2) with
+ * IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument
+ * is an index into a previously registered fixed wait region described by
+ * the below structure.
+ */
+struct io_uring_reg_wait {
+ struct __kernel_timespec ts;
+ __u32 min_wait_usec;
+ __u32 flags;
+ __u64 sigmask;
+ __u32 sigmask_sz;
+ __u32 pad[3];
+ __u64 pad2[2];
+};
+
+/*
+ * Argument for io_uring_enter(2) with IORING_GETEVENTS | IORING_ENTER_EXT_ARG
+ */
struct io_uring_getevents_arg {
__u64 sigmask;
__u32 sigmask_sz;
diff --git a/include/uapi/linux/media/raspberrypi/pisp_fe_config.h b/include/uapi/linux/media/raspberrypi/pisp_fe_config.h
new file mode 100644
index 000000000000..77237460a3b5
--- /dev/null
+++ b/include/uapi/linux/media/raspberrypi/pisp_fe_config.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * RP1 PiSP Front End Driver Configuration structures
+ *
+ * Copyright (C) 2021 - Raspberry Pi Ltd.
+ *
+ */
+#ifndef _UAPI_PISP_FE_CONFIG_
+#define _UAPI_PISP_FE_CONFIG_
+
+#include <linux/types.h>
+
+#include "pisp_common.h"
+#include "pisp_fe_statistics.h"
+
+#define PISP_FE_NUM_OUTPUTS 2
+
+enum pisp_fe_enable {
+ PISP_FE_ENABLE_INPUT = 0x000001,
+ PISP_FE_ENABLE_DECOMPRESS = 0x000002,
+ PISP_FE_ENABLE_DECOMPAND = 0x000004,
+ PISP_FE_ENABLE_BLA = 0x000008,
+ PISP_FE_ENABLE_DPC = 0x000010,
+ PISP_FE_ENABLE_STATS_CROP = 0x000020,
+ PISP_FE_ENABLE_DECIMATE = 0x000040,
+ PISP_FE_ENABLE_BLC = 0x000080,
+ PISP_FE_ENABLE_CDAF_STATS = 0x000100,
+ PISP_FE_ENABLE_AWB_STATS = 0x000200,
+ PISP_FE_ENABLE_RGBY = 0x000400,
+ PISP_FE_ENABLE_LSC = 0x000800,
+ PISP_FE_ENABLE_AGC_STATS = 0x001000,
+ PISP_FE_ENABLE_CROP0 = 0x010000,
+ PISP_FE_ENABLE_DOWNSCALE0 = 0x020000,
+ PISP_FE_ENABLE_COMPRESS0 = 0x040000,
+ PISP_FE_ENABLE_OUTPUT0 = 0x080000,
+ PISP_FE_ENABLE_CROP1 = 0x100000,
+ PISP_FE_ENABLE_DOWNSCALE1 = 0x200000,
+ PISP_FE_ENABLE_COMPRESS1 = 0x400000,
+ PISP_FE_ENABLE_OUTPUT1 = 0x800000
+};
+
+#define PISP_FE_ENABLE_CROP(i) (PISP_FE_ENABLE_CROP0 << (4 * (i)))
+#define PISP_FE_ENABLE_DOWNSCALE(i) (PISP_FE_ENABLE_DOWNSCALE0 << (4 * (i)))
+#define PISP_FE_ENABLE_COMPRESS(i) (PISP_FE_ENABLE_COMPRESS0 << (4 * (i)))
+#define PISP_FE_ENABLE_OUTPUT(i) (PISP_FE_ENABLE_OUTPUT0 << (4 * (i)))
+
+/*
+ * We use the enable flags to show when blocks are "dirty", but we need some
+ * extra ones too.
+ */
+enum pisp_fe_dirty {
+ PISP_FE_DIRTY_GLOBAL = 0x0001,
+ PISP_FE_DIRTY_FLOATING = 0x0002,
+ PISP_FE_DIRTY_OUTPUT_AXI = 0x0004
+};
+
+struct pisp_fe_global_config {
+ __u32 enables;
+ __u8 bayer_order;
+ __u8 pad[3];
+} __attribute__((packed));
+
+struct pisp_fe_input_axi_config {
+ /* burst length minus one, in the range 0..15; OR'd with flags */
+ __u8 maxlen_flags;
+ /* { prot[2:0], cache[3:0] } fields */
+ __u8 cache_prot;
+ /* QoS (only 4 LS bits are used) */
+ __u16 qos;
+} __attribute__((packed));
+
+struct pisp_fe_output_axi_config {
+ /* burst length minus one, in the range 0..15; OR'd with flags */
+ __u8 maxlen_flags;
+ /* { prot[2:0], cache[3:0] } fields */
+ __u8 cache_prot;
+ /* QoS (4 bitfields of 4 bits each for different panic levels) */
+ __u16 qos;
+ /* For Panic mode: Output FIFO panic threshold */
+ __u16 thresh;
+ /* For Panic mode: Output FIFO statistics throttle threshold */
+ __u16 throttle;
+} __attribute__((packed));
+
+struct pisp_fe_input_config {
+ __u8 streaming;
+ __u8 pad[3];
+ struct pisp_image_format_config format;
+ struct pisp_fe_input_axi_config axi;
+ /* Extra cycles delay before issuing each burst request */
+ __u8 holdoff;
+ __u8 pad2[3];
+} __attribute__((packed));
+
+struct pisp_fe_output_config {
+ struct pisp_image_format_config format;
+ __u16 ilines;
+ __u8 pad[2];
+} __attribute__((packed));
+
+struct pisp_fe_input_buffer_config {
+ __u32 addr_lo;
+ __u32 addr_hi;
+ __u16 frame_id;
+ __u16 pad;
+} __attribute__((packed));
+
+#define PISP_FE_DECOMPAND_LUT_SIZE 65
+
+struct pisp_fe_decompand_config {
+ __u16 lut[PISP_FE_DECOMPAND_LUT_SIZE];
+ __u16 pad;
+} __attribute__((packed));
+
+struct pisp_fe_dpc_config {
+ __u8 coeff_level;
+ __u8 coeff_range;
+ __u8 coeff_range2;
+#define PISP_FE_DPC_FLAG_FOLDBACK 1
+#define PISP_FE_DPC_FLAG_VFLAG 2
+ __u8 flags;
+} __attribute__((packed));
+
+#define PISP_FE_LSC_LUT_SIZE 16
+
+struct pisp_fe_lsc_config {
+ __u8 shift;
+ __u8 pad0;
+ __u16 scale;
+ __u16 centre_x;
+ __u16 centre_y;
+ __u16 lut[PISP_FE_LSC_LUT_SIZE];
+} __attribute__((packed));
+
+struct pisp_fe_rgby_config {
+ __u16 gain_r;
+ __u16 gain_g;
+ __u16 gain_b;
+ __u8 maxflag;
+ __u8 pad;
+} __attribute__((packed));
+
+struct pisp_fe_agc_stats_config {
+ __u16 offset_x;
+ __u16 offset_y;
+ __u16 size_x;
+ __u16 size_y;
+ /* each weight only 4 bits */
+ __u8 weights[PISP_AGC_STATS_NUM_ZONES / 2];
+ __u16 row_offset_x;
+ __u16 row_offset_y;
+ __u16 row_size_x;
+ __u16 row_size_y;
+ __u8 row_shift;
+ __u8 float_shift;
+ __u8 pad1[2];
+} __attribute__((packed));
+
+struct pisp_fe_awb_stats_config {
+ __u16 offset_x;
+ __u16 offset_y;
+ __u16 size_x;
+ __u16 size_y;
+ __u8 shift;
+ __u8 pad[3];
+ __u16 r_lo;
+ __u16 r_hi;
+ __u16 g_lo;
+ __u16 g_hi;
+ __u16 b_lo;
+ __u16 b_hi;
+} __attribute__((packed));
+
+struct pisp_fe_floating_stats_region {
+ __u16 offset_x;
+ __u16 offset_y;
+ __u16 size_x;
+ __u16 size_y;
+} __attribute__((packed));
+
+struct pisp_fe_floating_stats_config {
+ struct pisp_fe_floating_stats_region
+ regions[PISP_FLOATING_STATS_NUM_ZONES];
+} __attribute__((packed));
+
+#define PISP_FE_CDAF_NUM_WEIGHTS 8
+
+struct pisp_fe_cdaf_stats_config {
+ __u16 noise_constant;
+ __u16 noise_slope;
+ __u16 offset_x;
+ __u16 offset_y;
+ __u16 size_x;
+ __u16 size_y;
+ __u16 skip_x;
+ __u16 skip_y;
+ __u32 mode;
+} __attribute__((packed));
+
+struct pisp_fe_stats_buffer_config {
+ __u32 addr_lo;
+ __u32 addr_hi;
+} __attribute__((packed));
+
+struct pisp_fe_crop_config {
+ __u16 offset_x;
+ __u16 offset_y;
+ __u16 width;
+ __u16 height;
+} __attribute__((packed));
+
+enum pisp_fe_downscale_flags {
+ /* downscale the four Bayer components independently... */
+ DOWNSCALE_BAYER = 1,
+ /* ...without trying to preserve their spatial relationship */
+ DOWNSCALE_BIN = 2,
+};
+
+struct pisp_fe_downscale_config {
+ __u8 xin;
+ __u8 xout;
+ __u8 yin;
+ __u8 yout;
+ __u8 flags; /* enum pisp_fe_downscale_flags */
+ __u8 pad[3];
+ __u16 output_width;
+ __u16 output_height;
+} __attribute__((packed));
+
+struct pisp_fe_output_buffer_config {
+ __u32 addr_lo;
+ __u32 addr_hi;
+} __attribute__((packed));
+
+/* Each of the two output channels/branches: */
+struct pisp_fe_output_branch_config {
+ struct pisp_fe_crop_config crop;
+ struct pisp_fe_downscale_config downscale;
+ struct pisp_compress_config compress;
+ struct pisp_fe_output_config output;
+ __u32 pad;
+} __attribute__((packed));
+
+/* And finally one to rule them all: */
+struct pisp_fe_config {
+ /* I/O configuration: */
+ struct pisp_fe_stats_buffer_config stats_buffer;
+ struct pisp_fe_output_buffer_config output_buffer[PISP_FE_NUM_OUTPUTS];
+ struct pisp_fe_input_buffer_config input_buffer;
+ /* processing configuration: */
+ struct pisp_fe_global_config global;
+ struct pisp_fe_input_config input;
+ struct pisp_decompress_config decompress;
+ struct pisp_fe_decompand_config decompand;
+ struct pisp_bla_config bla;
+ struct pisp_fe_dpc_config dpc;
+ struct pisp_fe_crop_config stats_crop;
+ __u32 spare1; /* placeholder for future decimate configuration */
+ struct pisp_bla_config blc;
+ struct pisp_fe_rgby_config rgby;
+ struct pisp_fe_lsc_config lsc;
+ struct pisp_fe_agc_stats_config agc_stats;
+ struct pisp_fe_awb_stats_config awb_stats;
+ struct pisp_fe_cdaf_stats_config cdaf_stats;
+ struct pisp_fe_floating_stats_config floating_stats;
+ struct pisp_fe_output_axi_config output_axi;
+ struct pisp_fe_output_branch_config ch[PISP_FE_NUM_OUTPUTS];
+ /* non-register fields: */
+ __u32 dirty_flags; /* these use pisp_fe_enable */
+ __u32 dirty_flags_extra; /* these use pisp_fe_dirty */
+} __attribute__((packed));
+
+#endif /* _UAPI_PISP_FE_CONFIG_ */
diff --git a/include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h b/include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h
new file mode 100644
index 000000000000..a7d42985aee8
--- /dev/null
+++ b/include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * RP1 PiSP Front End statistics definitions
+ *
+ * Copyright (C) 2021 - Raspberry Pi Ltd.
+ *
+ */
+#ifndef _UAPI_PISP_FE_STATISTICS_H_
+#define _UAPI_PISP_FE_STATISTICS_H_
+
+#include <linux/types.h>
+
+#define PISP_FLOATING_STATS_NUM_ZONES 4
+#define PISP_AGC_STATS_NUM_BINS 1024
+#define PISP_AGC_STATS_SIZE 16
+#define PISP_AGC_STATS_NUM_ZONES (PISP_AGC_STATS_SIZE * PISP_AGC_STATS_SIZE)
+#define PISP_AGC_STATS_NUM_ROW_SUMS 512
+
+struct pisp_agc_statistics_zone {
+ __u64 Y_sum;
+ __u32 counted;
+ __u32 pad;
+} __attribute__((packed));
+
+struct pisp_agc_statistics {
+ __u32 row_sums[PISP_AGC_STATS_NUM_ROW_SUMS];
+ /*
+ * 32-bits per bin means an image (just less than) 16384x16384 pixels
+ * in size can weight every pixel from 0 to 15.
+ */
+ __u32 histogram[PISP_AGC_STATS_NUM_BINS];
+ struct pisp_agc_statistics_zone floating[PISP_FLOATING_STATS_NUM_ZONES];
+} __attribute__((packed));
+
+#define PISP_AWB_STATS_SIZE 32
+#define PISP_AWB_STATS_NUM_ZONES (PISP_AWB_STATS_SIZE * PISP_AWB_STATS_SIZE)
+
+struct pisp_awb_statistics_zone {
+ __u32 R_sum;
+ __u32 G_sum;
+ __u32 B_sum;
+ __u32 counted;
+} __attribute__((packed));
+
+struct pisp_awb_statistics {
+ struct pisp_awb_statistics_zone zones[PISP_AWB_STATS_NUM_ZONES];
+ struct pisp_awb_statistics_zone floating[PISP_FLOATING_STATS_NUM_ZONES];
+} __attribute__((packed));
+
+#define PISP_CDAF_STATS_SIZE 8
+#define PISP_CDAF_STATS_NUM_FOMS (PISP_CDAF_STATS_SIZE * PISP_CDAF_STATS_SIZE)
+
+struct pisp_cdaf_statistics {
+ __u64 foms[PISP_CDAF_STATS_NUM_FOMS];
+ __u64 floating[PISP_FLOATING_STATS_NUM_ZONES];
+} __attribute__((packed));
+
+struct pisp_statistics {
+ struct pisp_awb_statistics awb;
+ struct pisp_agc_statistics agc;
+ struct pisp_cdaf_statistics cdaf;
+} __attribute__((packed));
+
+#endif /* _UAPI_PISP_FE_STATISTICS_H_ */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 225bc366ffcb..c07008816aca 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -154,7 +154,7 @@ struct mount_attr {
*/
struct statmount {
__u32 size; /* Total size, including strings */
- __u32 mnt_opts; /* [str] Mount options of the mount */
+ __u32 mnt_opts; /* [str] Options (comma separated, escaped) */
__u64 mask; /* What results were written */
__u32 sb_dev_major; /* Device ID */
__u32 sb_dev_minor;
@@ -173,7 +173,13 @@ struct statmount {
__u32 mnt_root; /* [str] Root of mount relative to root of fs */
__u32 mnt_point; /* [str] Mountpoint relative to current root */
__u64 mnt_ns_id; /* ID of the mount namespace */
- __u64 __spare2[49];
+ __u32 fs_subtype; /* [str] Subtype of fs_type (if any) */
+ __u32 sb_source; /* [str] Source string of the mount */
+ __u32 opt_num; /* Number of fs options */
+ __u32 opt_array; /* [str] Array of nul terminated fs options */
+ __u32 opt_sec_num; /* Number of security options */
+ __u32 opt_sec_array; /* [str] Array of nul terminated security options */
+ __u64 __spare2[46];
char str[]; /* Variable size part containing strings */
};
@@ -207,6 +213,10 @@ struct mnt_id_req {
#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
#define STATMOUNT_MNT_NS_ID 0x00000040U /* Want/got mnt_ns_id */
#define STATMOUNT_MNT_OPTS 0x00000080U /* Want/got mnt_opts */
+#define STATMOUNT_FS_SUBTYPE 0x00000100U /* Want/got fs_subtype */
+#define STATMOUNT_SB_SOURCE 0x00000200U /* Want/got sb_source */
+#define STATMOUNT_OPT_ARRAY 0x00000400U /* Want/got opt_... */
+#define STATMOUNT_OPT_SEC_ARRAY 0x00000800U /* Want/got opt_sec... */
/*
* Special @mnt_id values that can be passed to listmount
diff --git a/include/uapi/linux/net_shaper.h b/include/uapi/linux/net_shaper.h
new file mode 100644
index 000000000000..d8834b59f7d7
--- /dev/null
+++ b/include/uapi/linux/net_shaper.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/net_shaper.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NET_SHAPER_H
+#define _UAPI_LINUX_NET_SHAPER_H
+
+#define NET_SHAPER_FAMILY_NAME "net-shaper"
+#define NET_SHAPER_FAMILY_VERSION 1
+
+/**
+ * enum net_shaper_scope - Defines the shaper @id interpretation.
+ * @NET_SHAPER_SCOPE_UNSPEC: The scope is not specified.
+ * @NET_SHAPER_SCOPE_NETDEV: The main shaper for the given network device.
+ * @NET_SHAPER_SCOPE_QUEUE: The shaper is attached to the given device queue,
+ * the @id represents the queue number.
+ * @NET_SHAPER_SCOPE_NODE: The shaper allows grouping of queues or other node
+ * shapers; can be nested in either @netdev shapers or other @node shapers,
+ * allowing placement in any location of the scheduling tree, except leaves
+ * and root.
+ */
+enum net_shaper_scope {
+ NET_SHAPER_SCOPE_UNSPEC,
+ NET_SHAPER_SCOPE_NETDEV,
+ NET_SHAPER_SCOPE_QUEUE,
+ NET_SHAPER_SCOPE_NODE,
+
+ /* private: */
+ __NET_SHAPER_SCOPE_MAX,
+ NET_SHAPER_SCOPE_MAX = (__NET_SHAPER_SCOPE_MAX - 1)
+};
+
+/**
+ * enum net_shaper_metric - Different metric supported by the shaper.
+ * @NET_SHAPER_METRIC_BPS: Shaper operates on a bits per second basis.
+ * @NET_SHAPER_METRIC_PPS: Shaper operates on a packets per second basis.
+ */
+enum net_shaper_metric {
+ NET_SHAPER_METRIC_BPS,
+ NET_SHAPER_METRIC_PPS,
+};
+
+enum {
+ NET_SHAPER_A_HANDLE = 1,
+ NET_SHAPER_A_METRIC,
+ NET_SHAPER_A_BW_MIN,
+ NET_SHAPER_A_BW_MAX,
+ NET_SHAPER_A_BURST,
+ NET_SHAPER_A_PRIORITY,
+ NET_SHAPER_A_WEIGHT,
+ NET_SHAPER_A_IFINDEX,
+ NET_SHAPER_A_PARENT,
+ NET_SHAPER_A_LEAVES,
+
+ __NET_SHAPER_A_MAX,
+ NET_SHAPER_A_MAX = (__NET_SHAPER_A_MAX - 1)
+};
+
+enum {
+ NET_SHAPER_A_HANDLE_SCOPE = 1,
+ NET_SHAPER_A_HANDLE_ID,
+
+ __NET_SHAPER_A_HANDLE_MAX,
+ NET_SHAPER_A_HANDLE_MAX = (__NET_SHAPER_A_HANDLE_MAX - 1)
+};
+
+enum {
+ NET_SHAPER_A_CAPS_IFINDEX = 1,
+ NET_SHAPER_A_CAPS_SCOPE,
+ NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS,
+ NET_SHAPER_A_CAPS_SUPPORT_METRIC_PPS,
+ NET_SHAPER_A_CAPS_SUPPORT_NESTING,
+ NET_SHAPER_A_CAPS_SUPPORT_BW_MIN,
+ NET_SHAPER_A_CAPS_SUPPORT_BW_MAX,
+ NET_SHAPER_A_CAPS_SUPPORT_BURST,
+ NET_SHAPER_A_CAPS_SUPPORT_PRIORITY,
+ NET_SHAPER_A_CAPS_SUPPORT_WEIGHT,
+
+ __NET_SHAPER_A_CAPS_MAX,
+ NET_SHAPER_A_CAPS_MAX = (__NET_SHAPER_A_CAPS_MAX - 1)
+};
+
+enum {
+ NET_SHAPER_CMD_GET = 1,
+ NET_SHAPER_CMD_SET,
+ NET_SHAPER_CMD_DELETE,
+ NET_SHAPER_CMD_GROUP,
+ NET_SHAPER_CMD_CAP_GET,
+
+ __NET_SHAPER_CMD_MAX,
+ NET_SHAPER_CMD_MAX = (__NET_SHAPER_CMD_MAX - 1)
+};
+
+#endif /* _UAPI_LINUX_NET_SHAPER_H */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 7c308f04e7a0..e4be227d3ad6 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -122,6 +122,9 @@ enum {
NETDEV_A_NAPI_ID,
NETDEV_A_NAPI_IRQ,
NETDEV_A_NAPI_PID,
+ NETDEV_A_NAPI_DEFER_HARD_IRQS,
+ NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
+ NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
__NETDEV_A_NAPI_MAX,
NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
@@ -199,6 +202,7 @@ enum {
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
NETDEV_CMD_BIND_RX,
+ NETDEV_CMD_NAPI_SET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 9e9079321380..49c944e78463 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -564,16 +564,26 @@ enum nft_immediate_attributes {
/**
* enum nft_bitwise_ops - nf_tables bitwise operations
*
- * @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and
- * XOR boolean operations
+ * @NFT_BITWISE_MASK_XOR: mask-and-xor operation used to implement NOT, AND, OR
+ * and XOR boolean operations
* @NFT_BITWISE_LSHIFT: left-shift operation
* @NFT_BITWISE_RSHIFT: right-shift operation
+ * @NFT_BITWISE_AND: and operation
+ * @NFT_BITWISE_OR: or operation
+ * @NFT_BITWISE_XOR: xor operation
*/
enum nft_bitwise_ops {
- NFT_BITWISE_BOOL,
+ NFT_BITWISE_MASK_XOR,
NFT_BITWISE_LSHIFT,
NFT_BITWISE_RSHIFT,
+ NFT_BITWISE_AND,
+ NFT_BITWISE_OR,
+ NFT_BITWISE_XOR,
};
+/*
+ * Old name for NFT_BITWISE_MASK_XOR. Retained for backwards-compatibility.
+ */
+#define NFT_BITWISE_BOOL NFT_BITWISE_MASK_XOR
/**
* enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
@@ -586,6 +596,7 @@ enum nft_bitwise_ops {
* @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops)
* @NFTA_BITWISE_DATA: argument for non-boolean operations
* (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_SREG2: second source register (NLA_U32: nft_registers)
*
* The bitwise expression supports boolean and shift operations. It implements
* the boolean operations by performing the following operation:
@@ -609,6 +620,7 @@ enum nft_bitwise_attributes {
NFTA_BITWISE_XOR,
NFTA_BITWISE_OP,
NFTA_BITWISE_DATA,
+ NFTA_BITWISE_SREG2,
__NFTA_BITWISE_MAX
};
#define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1)
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 4fa4e979e948..2f5b4be25261 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -164,6 +164,7 @@ enum nfc_commands {
* @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command
* @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed
* to a vendor specific command implementation
+ * @NFC_ATTR_TARGET_ATS: ISO 14443 type A target Answer To Select
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -198,6 +199,7 @@ enum nfc_attrs {
NFC_ATTR_VENDOR_ID,
NFC_ATTR_VENDOR_SUBCMD,
NFC_ATTR_VENDOR_DATA,
+ NFC_ATTR_TARGET_ATS,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
@@ -225,6 +227,7 @@ enum nfc_sdp_attr {
#define NFC_GB_MAXSIZE 48
#define NFC_FIRMWARE_NAME_MAXSIZE 32
#define NFC_ISO15693_UID_MAXSIZE 8
+#define NFC_ATS_MAXSIZE 20
/* NFC protocols */
#define NFC_PROTO_JEWEL 1
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f97f5adc8d51..6d11437596b9 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2868,6 +2868,9 @@ enum nl80211_commands {
* nested item, it contains attributes defined in
* &enum nl80211_if_combination_attrs.
*
+ * @NL80211_ATTR_VIF_RADIO_MASK: Bitmask of allowed radios (u32).
+ * A value of 0 means all radios.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3416,6 +3419,8 @@ enum nl80211_attrs {
NL80211_ATTR_WIPHY_RADIOS,
NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS,
+ NL80211_ATTR_VIF_RADIO_MASK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4698,6 +4703,7 @@ enum nl80211_survey_info {
* overrides all other flags.
* @NL80211_MNTR_FLAG_ACTIVE: use the configured MAC address
* and ACK incoming unicast packets.
+ * @NL80211_MNTR_FLAG_SKIP_TX: do not pass local tx packets
*
* @__NL80211_MNTR_FLAG_AFTER_LAST: internal use
* @NL80211_MNTR_FLAG_MAX: highest possible monitor flag
@@ -4710,6 +4716,7 @@ enum nl80211_mntr_flags {
NL80211_MNTR_FLAG_OTHER_BSS,
NL80211_MNTR_FLAG_COOK_FRAMES,
NL80211_MNTR_FLAG_ACTIVE,
+ NL80211_MNTR_FLAG_SKIP_TX,
/* keep last */
__NL80211_MNTR_FLAG_AFTER_LAST,
@@ -8031,6 +8038,8 @@ enum nl80211_ap_settings_flags {
* @NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION: Supported interface
* combination for this radio. Attribute may be present multiple times
* and contains attributes defined in &enum nl80211_if_combination_attrs.
+ * @NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK: bitmask (u32) of antennas
+ * connected to this radio.
*
* @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal
* @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute
@@ -8041,6 +8050,7 @@ enum nl80211_wiphy_radio_attrs {
NL80211_WIPHY_RADIO_ATTR_INDEX,
NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE,
NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
+ NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
/* keep last */
__NL80211_WIPHY_RADIO_ATTR_LAST,
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 4842c36fdf80..0524d541d4e3 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -511,7 +511,16 @@ struct perf_event_attr {
__u16 sample_max_stack;
__u16 __reserved_2;
__u32 aux_sample_size;
- __u32 __reserved_3;
+
+ union {
+ __u32 aux_action;
+ struct {
+ __u32 aux_start_paused : 1, /* start AUX area tracing paused */
+ aux_pause : 1, /* on overflow, pause AUX area tracing */
+ aux_resume : 1, /* on overflow, resume AUX area tracing */
+ __reserved_3 : 29;
+ };
+ };
/*
* User provided data if sigtrap=1, passed back to user via
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
index 565fc0629fff..4540f6301b8c 100644
--- a/include/uapi/linux/pidfd.h
+++ b/include/uapi/linux/pidfd.h
@@ -16,6 +16,55 @@
#define PIDFD_SIGNAL_THREAD_GROUP (1UL << 1)
#define PIDFD_SIGNAL_PROCESS_GROUP (1UL << 2)
+/* Flags for pidfd_info. */
+#define PIDFD_INFO_PID (1UL << 0) /* Always returned, even if not requested */
+#define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */
+#define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */
+
+#define PIDFD_INFO_SIZE_VER0 64 /* sizeof first published struct */
+
+struct pidfd_info {
+ /*
+ * This mask is similar to the request_mask in statx(2).
+ *
+ * Userspace indicates what extensions or expensive-to-calculate fields
+ * they want by setting the corresponding bits in mask. The kernel
+ * will ignore bits that it does not know about.
+ *
+ * When filling the structure, the kernel will only set bits
+ * corresponding to the fields that were actually filled by the kernel.
+ * This also includes any future extensions that might be automatically
+ * filled. If the structure size is too small to contain a field
+ * (requested or not), to avoid confusion the mask will not
+ * contain a bit for that field.
+ *
+ * As such, userspace MUST verify that mask contains the
+ * corresponding flags after the ioctl(2) returns to ensure that it is
+ * using valid data.
+ */
+ __u64 mask;
+ /*
+ * The information contained in the following fields might be stale at the
+ * time it is received, as the target process might have exited as soon as
+ * the IOCTL was processed, and there is no way to avoid that. However, it
+ * is guaranteed that if the call was successful, then the information was
+ * correct and referred to the intended process at the time the work was
+ * performed. */
+ __u64 cgroupid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 ppid;
+ __u32 ruid;
+ __u32 rgid;
+ __u32 euid;
+ __u32 egid;
+ __u32 suid;
+ __u32 sgid;
+ __u32 fsuid;
+ __u32 fsgid;
+ __u32 spare0[1];
+};
+
#define PIDFS_IOCTL_MAGIC 0xFF
#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
@@ -28,5 +77,6 @@
#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
+#define PIDFD_GET_INFO _IOWR(PIDFS_IOCTL_MAGIC, 11, struct pidfd_info)
#endif /* _UAPI_LINUX_PIDFD_H */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index a3cd0c2dc995..25a9a47001cd 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -836,6 +836,8 @@ enum {
TCA_FQ_WEIGHTS, /* Weights for each band */
+ TCA_FQ_OFFLOAD_HORIZON, /* dequeue paced packets within this horizon immediately (us units) */
+
__TCA_FQ_MAX
};
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 35791791a879..557a3d2ac1d4 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -328,4 +328,26 @@ struct prctl_mm_map {
# define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */
# define PR_PPC_DEXCR_CTRL_MASK 0x1f
+/*
+ * Get the current shadow stack configuration for the current thread,
+ * this will be the value configured via PR_SET_SHADOW_STACK_STATUS.
+ */
+#define PR_GET_SHADOW_STACK_STATUS 74
+
+/*
+ * Set the current shadow stack configuration. Enabling the shadow
+ * stack will cause a shadow stack to be allocated for the thread.
+ */
+#define PR_SET_SHADOW_STACK_STATUS 75
+# define PR_SHADOW_STACK_ENABLE (1UL << 0)
+# define PR_SHADOW_STACK_WRITE (1UL << 1)
+# define PR_SHADOW_STACK_PUSH (1UL << 2)
+
+/*
+ * Prevent further changes to the specified shadow stack
+ * configuration. All bits may be locked via this call, including
+ * undefined bits.
+ */
+#define PR_LOCK_SHADOW_STACK_STATUS 76
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 3b687d20c9ed..db7254d52d93 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -174,7 +174,7 @@ enum {
#define RTM_GETLINKPROP RTM_GETLINKPROP
RTM_NEWVLAN = 112,
-#define RTM_NEWNVLAN RTM_NEWVLAN
+#define RTM_NEWVLAN RTM_NEWVLAN
RTM_DELVLAN,
#define RTM_DELVLAN RTM_DELVLAN
RTM_GETVLAN,
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index d3994b7716bc..9025dd5a4f0f 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -215,5 +215,6 @@ struct opal_revert_lsp {
#define IOC_OPAL_GET_GEOMETRY _IOR('p', 238, struct opal_geometry)
#define IOC_OPAL_DISCOVERY _IOW('p', 239, struct opal_discovery)
#define IOC_OPAL_REVERT_LSP _IOW('p', 240, struct opal_revert_lsp)
+#define IOC_OPAL_SET_SID_PW _IOW('p', 241, struct opal_new_pw)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
index fc78bf3aead7..ba8604bdf206 100644
--- a/include/uapi/linux/thermal.h
+++ b/include/uapi/linux/thermal.h
@@ -3,6 +3,8 @@
#define _UAPI_LINUX_THERMAL_H
#define THERMAL_NAME_LENGTH 20
+#define THERMAL_THRESHOLD_WAY_UP BIT(0)
+#define THERMAL_THRESHOLD_WAY_DOWN BIT(1)
enum thermal_device_mode {
THERMAL_DEVICE_DISABLED = 0,
@@ -18,7 +20,7 @@ enum thermal_trip_type {
/* Adding event notification support elements */
#define THERMAL_GENL_FAMILY_NAME "thermal"
-#define THERMAL_GENL_VERSION 0x01
+#define THERMAL_GENL_VERSION 0x02
#define THERMAL_GENL_SAMPLING_GROUP_NAME "sampling"
#define THERMAL_GENL_EVENT_GROUP_NAME "event"
@@ -28,6 +30,7 @@ enum thermal_genl_attr {
THERMAL_GENL_ATTR_TZ,
THERMAL_GENL_ATTR_TZ_ID,
THERMAL_GENL_ATTR_TZ_TEMP,
+ THERMAL_GENL_ATTR_TZ_PREV_TEMP,
THERMAL_GENL_ATTR_TZ_TRIP,
THERMAL_GENL_ATTR_TZ_TRIP_ID,
THERMAL_GENL_ATTR_TZ_TRIP_TYPE,
@@ -48,6 +51,9 @@ enum thermal_genl_attr {
THERMAL_GENL_ATTR_CPU_CAPABILITY_ID,
THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE,
THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY,
+ THERMAL_GENL_ATTR_THRESHOLD,
+ THERMAL_GENL_ATTR_THRESHOLD_TEMP,
+ THERMAL_GENL_ATTR_THRESHOLD_DIRECTION,
__THERMAL_GENL_ATTR_MAX,
};
#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
@@ -75,6 +81,11 @@ enum thermal_genl_event {
THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */
THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */
THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */
+ THERMAL_GENL_EVENT_THRESHOLD_ADD, /* A thresold has been added */
+ THERMAL_GENL_EVENT_THRESHOLD_DELETE, /* A thresold has been deleted */
+ THERMAL_GENL_EVENT_THRESHOLD_FLUSH, /* All thresolds have been deleted */
+ THERMAL_GENL_EVENT_THRESHOLD_UP, /* A thresold has been crossed the way up */
+ THERMAL_GENL_EVENT_THRESHOLD_DOWN, /* A thresold has been crossed the way down */
__THERMAL_GENL_EVENT_MAX,
};
#define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1)
@@ -82,12 +93,16 @@ enum thermal_genl_event {
/* Commands supported by the thermal_genl_family */
enum thermal_genl_cmd {
THERMAL_GENL_CMD_UNSPEC,
- THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */
- THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */
- THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */
- THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */
- THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */
- THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */
+ THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */
+ THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */
+ THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */
+ THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */
+ THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */
+ THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */
+ THERMAL_GENL_CMD_THRESHOLD_GET, /* List of thresholds */
+ THERMAL_GENL_CMD_THRESHOLD_ADD, /* Add a threshold */
+ THERMAL_GENL_CMD_THRESHOLD_DELETE, /* Delete a threshold */
+ THERMAL_GENL_CMD_THRESHOLD_FLUSH, /* Flush all the thresholds */
__THERMAL_GENL_CMD_MAX,
};
#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 12873639ea96..a8bc98bb69fc 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -147,8 +147,18 @@
*/
#define UBLK_F_NEED_GET_DATA (1UL << 2)
+/*
+ * - Block devices are recoverable if ublk server exits and restarts
+ * - Outstanding I/O when ublk server exits is met with errors
+ * - I/O issued while there is no ublk server queues
+ */
#define UBLK_F_USER_RECOVERY (1UL << 3)
+/*
+ * - Block devices are recoverable if ublk server exits and restarts
+ * - Outstanding I/O when ublk server exits is reissued
+ * - I/O issued while there is no ublk server queues
+ */
#define UBLK_F_USER_RECOVERY_REISSUE (1UL << 4)
/*
@@ -190,10 +200,18 @@
*/
#define UBLK_F_ZONED (1ULL << 8)
+/*
+ * - Block devices are recoverable if ublk server exits and restarts
+ * - Outstanding I/O when ublk server exits is met with errors
+ * - I/O issued while there is no ublk server is met with errors
+ */
+#define UBLK_F_USER_RECOVERY_FAIL_IO (1ULL << 9)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
#define UBLK_S_DEV_QUIESCED 2
+#define UBLK_S_DEV_FAIL_IO 3
/* shipped via sqe->cmd of io_uring command */
struct ublksrv_ctrl_cmd {
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 1a0fe8b151fb..d85d671deed3 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -31,7 +31,7 @@ struct udphdr {
#define UDP_CORK 1 /* Never send partially complete segments */
#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */
#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
-#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */
+#define UDP_NO_CHECK6_RX 102 /* Disable accepting checksum for UDP6 */
#define UDP_SEGMENT 103 /* Set GSO segmentation size */
#define UDP_GRO 104 /* This socket can receive UDP GRO packets */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index ef0128c7369c..44a16e0e5a12 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -2,7 +2,7 @@
/*
* V4L2 DV timings header.
*
- * Copyright (C) 2012-2016 Hans Verkuil <hans.verkuil@cisco.com>
+ * Copyright (C) 2012-2016 Hans Verkuil <hansverk@cisco.com>
*/
#ifndef _V4L2_DV_TIMINGS_H
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 27239cb64065..e7c4dce39007 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -798,6 +798,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
+#define V4L2_PIX_FMT_Y16I v4l2_fourcc('Y', '1', '6', 'I') /* Greyscale 16-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
@@ -859,6 +860,8 @@ struct v4l2_pix_format {
/* Vendor specific - used for RaspberryPi PiSP */
#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */
+#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C') /* PiSP FE configuration */
+#define V4L2_META_FMT_RPI_FE_STATS v4l2_fourcc('R', 'P', 'F', 'S') /* PiSP FE stats */
#ifdef __KERNEL__
/*
@@ -906,6 +909,9 @@ struct v4l2_fmtdesc {
#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100
#define V4L2_FMT_FLAG_META_LINE_BASED 0x0200
+/* Format description flag, to be ORed with the index */
+#define V4L2_FMTDESC_FLAG_ENUM_ALL 0x80000000
+
/* Frame Size and frame rate enumeration */
/*
* F R A M E S I Z E E N U M E R A T I O N
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
index 71a54a6849ca..2fccb64c9d6b 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -329,6 +329,7 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+ /* akcipher sign/verify opcodes are deprecated */
#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
diff --git a/include/uapi/linux/vmclock-abi.h b/include/uapi/linux/vmclock-abi.h
new file mode 100644
index 000000000000..2d99b29ac44a
--- /dev/null
+++ b/include/uapi/linux/vmclock-abi.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+
+/*
+ * This structure provides a vDSO-style clock to VM guests, exposing the
+ * relationship (or lack thereof) between the CPU clock (TSC, timebase, arch
+ * counter, etc.) and real time. It is designed to address the problem of
+ * live migration, which other clock enlightenments do not.
+ *
+ * When a guest is live migrated, this affects the clock in two ways.
+ *
+ * First, even between identical hosts the actual frequency of the underlying
+ * counter will change within the tolerances of its specification (typically
+ * ±50PPM, or 4 seconds a day). This frequency also varies over time on the
+ * same host, but can be tracked by NTP as it generally varies slowly. With
+ * live migration there is a step change in the frequency, with no warning.
+ *
+ * Second, there may be a step change in the value of the counter itself, as
+ * its accuracy is limited by the precision of the NTP synchronization on the
+ * source and destination hosts.
+ *
+ * So any calibration (NTP, PTP, etc.) which the guest has done on the source
+ * host before migration is invalid, and needs to be redone on the new host.
+ *
+ * In its most basic mode, this structure provides only an indication to the
+ * guest that live migration has occurred. This allows the guest to know that
+ * its clock is invalid and take remedial action. For applications that need
+ * reliable accurate timestamps (e.g. distributed databases), the structure
+ * can be mapped all the way to userspace. This allows the application to see
+ * directly for itself that the clock is disrupted and take appropriate
+ * action, even when using a vDSO-style method to get the time instead of a
+ * system call.
+ *
+ * In its more advanced mode. this structure can also be used to expose the
+ * precise relationship of the CPU counter to real time, as calibrated by the
+ * host. This means that userspace applications can have accurate time
+ * immediately after live migration, rather than having to pause operations
+ * and wait for NTP to recover. This mode does, of course, rely on the
+ * counter being reliable and consistent across CPUs.
+ *
+ * Note that this must be true UTC, never with smeared leap seconds. If a
+ * guest wishes to construct a smeared clock, it can do so. Presenting a
+ * smeared clock through this interface would be problematic because it
+ * actually messes with the apparent counter *period*. A linear smearing
+ * of 1 ms per second would effectively tweak the counter period by 1000PPM
+ * at the start/end of the smearing period, while a sinusoidal smear would
+ * basically be impossible to represent.
+ *
+ * This structure is offered with the intent that it be adopted into the
+ * nascent virtio-rtc standard, as a virtio-rtc that does not address the live
+ * migration problem seems a little less than fit for purpose. For that
+ * reason, certain fields use precisely the same numeric definitions as in
+ * the virtio-rtc proposal. The structure can also be exposed through an ACPI
+ * device with the CID "VMCLOCK", modelled on the "VMGENID" device except for
+ * the fact that it uses a real _CRS to convey the address of the structure
+ * (which should be a full page, to allow for mapping directly to userspace).
+ */
+
+#ifndef __VMCLOCK_ABI_H__
+#define __VMCLOCK_ABI_H__
+
+#include <linux/types.h>
+
+struct vmclock_abi {
+ /* CONSTANT FIELDS */
+ __le32 magic;
+#define VMCLOCK_MAGIC 0x4b4c4356 /* "VCLK" */
+ __le32 size; /* Size of region containing this structure */
+ __le16 version; /* 1 */
+ __u8 counter_id; /* Matches VIRTIO_RTC_COUNTER_xxx except INVALID */
+#define VMCLOCK_COUNTER_ARM_VCNT 0
+#define VMCLOCK_COUNTER_X86_TSC 1
+#define VMCLOCK_COUNTER_INVALID 0xff
+ __u8 time_type; /* Matches VIRTIO_RTC_TYPE_xxx */
+#define VMCLOCK_TIME_UTC 0 /* Since 1970-01-01 00:00:00z */
+#define VMCLOCK_TIME_TAI 1 /* Since 1970-01-01 00:00:00z */
+#define VMCLOCK_TIME_MONOTONIC 2 /* Since undefined epoch */
+#define VMCLOCK_TIME_INVALID_SMEARED 3 /* Not supported */
+#define VMCLOCK_TIME_INVALID_MAYBE_SMEARED 4 /* Not supported */
+
+ /* NON-CONSTANT FIELDS PROTECTED BY SEQCOUNT LOCK */
+ __le32 seq_count; /* Low bit means an update is in progress */
+ /*
+ * This field changes to another non-repeating value when the CPU
+ * counter is disrupted, for example on live migration. This lets
+ * the guest know that it should discard any calibration it has
+ * performed of the counter against external sources (NTP/PTP/etc.).
+ */
+ __le64 disruption_marker;
+ __le64 flags;
+ /* Indicates that the tai_offset_sec field is valid */
+#define VMCLOCK_FLAG_TAI_OFFSET_VALID (1 << 0)
+ /*
+ * Optionally used to notify guests of pending maintenance events.
+ * A guest which provides latency-sensitive services may wish to
+ * remove itself from service if an event is coming up. Two flags
+ * indicate the approximate imminence of the event.
+ */
+#define VMCLOCK_FLAG_DISRUPTION_SOON (1 << 1) /* About a day */
+#define VMCLOCK_FLAG_DISRUPTION_IMMINENT (1 << 2) /* About an hour */
+#define VMCLOCK_FLAG_PERIOD_ESTERROR_VALID (1 << 3)
+#define VMCLOCK_FLAG_PERIOD_MAXERROR_VALID (1 << 4)
+#define VMCLOCK_FLAG_TIME_ESTERROR_VALID (1 << 5)
+#define VMCLOCK_FLAG_TIME_MAXERROR_VALID (1 << 6)
+ /*
+ * If the MONOTONIC flag is set then (other than leap seconds) it is
+ * guaranteed that the time calculated according this structure at
+ * any given moment shall never appear to be later than the time
+ * calculated via the structure at any *later* moment.
+ *
+ * In particular, a timestamp based on a counter reading taken
+ * immediately after setting the low bit of seq_count (and the
+ * associated memory barrier), using the previously-valid time and
+ * period fields, shall never be later than a timestamp based on
+ * a counter reading taken immediately before *clearing* the low
+ * bit again after the update, using the about-to-be-valid fields.
+ */
+#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7)
+
+ __u8 pad[2];
+ __u8 clock_status;
+#define VMCLOCK_STATUS_UNKNOWN 0
+#define VMCLOCK_STATUS_INITIALIZING 1
+#define VMCLOCK_STATUS_SYNCHRONIZED 2
+#define VMCLOCK_STATUS_FREERUNNING 3
+#define VMCLOCK_STATUS_UNRELIABLE 4
+
+ /*
+ * The time exposed through this device is never smeared. This field
+ * corresponds to the 'subtype' field in virtio-rtc, which indicates
+ * the smearing method. However in this case it provides a *hint* to
+ * the guest operating system, such that *if* the guest OS wants to
+ * provide its users with an alternative clock which does not follow
+ * UTC, it may do so in a fashion consistent with the other systems
+ * in the nearby environment.
+ */
+ __u8 leap_second_smearing_hint; /* Matches VIRTIO_RTC_SUBTYPE_xxx */
+#define VMCLOCK_SMEARING_STRICT 0
+#define VMCLOCK_SMEARING_NOON_LINEAR 1
+#define VMCLOCK_SMEARING_UTC_SLS 2
+ __le16 tai_offset_sec; /* Actually two's complement signed */
+ __u8 leap_indicator;
+ /*
+ * This field is based on the VIRTIO_RTC_LEAP_xxx values as defined
+ * in the current draft of virtio-rtc, but since smearing cannot be
+ * used with the shared memory device, some values are not used.
+ *
+ * The _POST_POS and _POST_NEG values allow the guest to perform
+ * its own smearing during the day or so after a leap second when
+ * such smearing may need to continue being applied for a leap
+ * second which is now theoretically "historical".
+ */
+#define VMCLOCK_LEAP_NONE 0x00 /* No known nearby leap second */
+#define VMCLOCK_LEAP_PRE_POS 0x01 /* Positive leap second at EOM */
+#define VMCLOCK_LEAP_PRE_NEG 0x02 /* Negative leap second at EOM */
+#define VMCLOCK_LEAP_POS 0x03 /* Set during 23:59:60 second */
+#define VMCLOCK_LEAP_POST_POS 0x04
+#define VMCLOCK_LEAP_POST_NEG 0x05
+
+ /* Bit shift for counter_period_frac_sec and its error rate */
+ __u8 counter_period_shift;
+ /*
+ * Paired values of counter and UTC at a given point in time.
+ */
+ __le64 counter_value;
+ /*
+ * Counter period, and error margin of same. The unit of these
+ * fields is 1/2^(64 + counter_period_shift) of a second.
+ */
+ __le64 counter_period_frac_sec;
+ __le64 counter_period_esterror_rate_frac_sec;
+ __le64 counter_period_maxerror_rate_frac_sec;
+
+ /*
+ * Time according to time_type field above.
+ */
+ __le64 time_sec; /* Seconds since time_type epoch */
+ __le64 time_frac_sec; /* Units of 1/2^64 of a second */
+ __le64 time_esterror_nanosec;
+ __le64 time_maxerror_nanosec;
+};
+
+#endif /* __VMCLOCK_ABI_H__ */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index 9463db2dfa9d..9854f9cff3c6 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -11,6 +11,7 @@
*/
#include <linux/libc-compat.h>
+#include <linux/types.h>
#ifndef _UAPI_LINUX_XATTR_H
#define _UAPI_LINUX_XATTR_H
@@ -20,6 +21,12 @@
#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
+
+struct xattr_args {
+ __aligned_u64 __user value;
+ __u32 size;
+ __u32 flags;
+};
#endif
/* Namespaces */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index f28701500714..d73a97e3030a 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -322,6 +322,7 @@ enum xfrm_attr_type_t {
XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */
XFRMA_SA_DIR, /* __u8 */
XFRMA_NAT_KEEPALIVE_INTERVAL, /* __u32 in seconds for NAT keepalive */
+ XFRMA_SA_PCPU, /* __u32 */
__XFRMA_MAX
#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
@@ -437,6 +438,7 @@ struct xfrm_userpolicy_info {
#define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */
/* Automatically expand selector to include matching ICMP payloads. */
#define XFRM_POLICY_ICMP 2
+#define XFRM_POLICY_CPU_ACQUIRE 4
__u8 share;
};
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index b85f24cac3f5..d967baa0cd0c 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -19,10 +19,10 @@
#include <vdso/time32.h>
#include <vdso/time64.h>
-#ifdef CONFIG_ARCH_HAS_VDSO_DATA
-#include <asm/vdso/data.h>
+#ifdef CONFIG_ARCH_HAS_VDSO_TIME_DATA
+#include <asm/vdso/time_data.h>
#else
-struct arch_vdso_data {};
+struct arch_vdso_time_data {};
#endif
#define VDSO_BASES (CLOCK_TAI + 1)
@@ -114,7 +114,7 @@ struct vdso_data {
u32 hrtimer_res;
u32 __unused;
- struct arch_vdso_data arch_data;
+ struct arch_vdso_time_data arch_data;
};
/**
diff --git a/include/vdso/page.h b/include/vdso/page.h
new file mode 100644
index 000000000000..710ae2414e68
--- /dev/null
+++ b/include/vdso/page.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_PAGE_H
+#define __VDSO_PAGE_H
+
+#include <uapi/linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size.
+ *
+ * Note: This definition is required because PAGE_SHIFT is used
+ * in several places throuout the codebase.
+ */
+#define PAGE_SHIFT CONFIG_PAGE_SHIFT
+
+#define PAGE_SIZE (_AC(1,UL) << CONFIG_PAGE_SHIFT)
+
+#if !defined(CONFIG_64BIT)
+/*
+ * Applies only to 32-bit architectures.
+ *
+ * Subtle: (1 << CONFIG_PAGE_SHIFT) is an int, not an unsigned long.
+ * So if we assign PAGE_MASK to a larger type it gets extended the
+ * way we want (i.e. with 1s in the high bits) while masking a
+ * 64-bit value such as phys_addr_t.
+ */
+#define PAGE_MASK (~((1 << CONFIG_PAGE_SHIFT) - 1))
+#else
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#endif
+
+#endif /* __VDSO_PAGE_H */
diff --git a/include/video/omapfb_dss.h b/include/video/omapfb_dss.h
index a8c0c3eeeb5b..d133190e3143 100644
--- a/include/video/omapfb_dss.h
+++ b/include/video/omapfb_dss.h
@@ -811,14 +811,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
}
-struct device_node *
-omapdss_of_get_next_port(const struct device_node *parent,
- struct device_node *prev);
-
-struct device_node *
-omapdss_of_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev);
-
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node);
#else