aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/buildid.c5
-rw-r--r--lib/codetag.c3
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/crypto/Makefile2
-rw-r--r--lib/crypto/mpi/mpi-bit.c1
-rw-r--r--lib/crypto/mpi/mpi-mul.c2
-rw-r--r--lib/crypto/simd.c11
-rw-r--r--lib/interval_tree_test.c2
-rw-r--r--lib/iov_iter.c93
-rw-r--r--lib/kunit/string-stream-test.c1
-rw-r--r--lib/locking-selftest.c39
-rw-r--r--lib/maple_tree.c14
-rw-r--r--lib/objpool.c18
-rw-r--r--lib/random32.c2
-rw-r--r--lib/rbtree_test.c2
-rw-r--r--lib/slub_kunit.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_parman.c2
-rw-r--r--lib/test_scanf.c2
20 files changed, 158 insertions, 65 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7315f643817a..188f3fd6bdd3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1409,22 +1409,14 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst.
config PROVE_RAW_LOCK_NESTING
- bool "Enable raw_spinlock - spinlock nesting checks"
+ bool
depends on PROVE_LOCKING
- default n
+ default y
help
Enable the raw_spinlock vs. spinlock nesting checks which ensure
that the lock nesting rules for PREEMPT_RT enabled kernels are
not violated.
- NOTE: There are known nesting problems. So if you enable this
- option expect lockdep splats until these problems have been fully
- addressed which is work in progress. This config switch allows to
- identify and analyze these problems. It will be removed and the
- check permanently enabled once the main issues have been fixed.
-
- If unsure, select N.
-
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
@@ -1905,7 +1897,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED
- default y if PPC || X86 || ARM64
+ default y if PPC || X86 || ARM64 || S390
help
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
@@ -3060,7 +3052,7 @@ config RUST_BUILD_ASSERT_ALLOW
bool "Allow unoptimized build-time assertions"
depends on RUST
help
- Controls how are `build_error!` and `build_assert!` handled during build.
+ Controls how `build_error!` and `build_assert!` are handled during the build.
If calls to them exist in the binary, it may indicate a violated invariant
or that the optimizer failed to verify the invariant during compilation.
diff --git a/lib/buildid.c b/lib/buildid.c
index 290641d92ac1..c4b0f376fb34 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -5,6 +5,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
+#include <linux/secretmem.h>
#define BUILD_ID 3
@@ -64,6 +65,10 @@ static int freader_get_folio(struct freader *r, loff_t file_off)
freader_put_folio(r);
+ /* reject secretmem folios created with memfd_secret() */
+ if (secretmem_mapping(r->file->f_mapping))
+ return -EFAULT;
+
r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
/* if sleeping is allowed, wait for the page, if necessary */
diff --git a/lib/codetag.c b/lib/codetag.c
index afa8a2d4f317..d1fbbb7c2ec3 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
@@ -228,6 +228,9 @@ bool codetag_unload_module(struct module *mod)
if (!mod)
return true;
+ /* await any module's kfree_rcu() operations to complete */
+ kvfree_rcu_barrier();
+
mutex_lock(&codetag_lock);
list_for_each_entry(cttype, &codetag_types, link) {
struct codetag_module *found = NULL;
diff --git a/lib/crc32.c b/lib/crc32.c
index 5649847d0a8d..ff587fee3893 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -205,7 +205,11 @@ EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+EXPORT_SYMBOL(crc32_le_base);
+
u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+EXPORT_SYMBOL(__crc32c_le_base);
+
u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
/*
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 969baab8c805..01fac1cd05a1 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -58,3 +58,5 @@ libcurve25519-y += curve25519-selftest.o
endif
obj-$(CONFIG_MPILIB) += mpi/
+
+obj-$(CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) += simd.o
diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
index 835a2f0622a0..934d81311360 100644
--- a/lib/crypto/mpi/mpi-bit.c
+++ b/lib/crypto/mpi/mpi-bit.c
@@ -95,6 +95,7 @@ int mpi_set_bit(MPI a, unsigned int n)
a->d[limbno] |= (A_LIMB_1<<bitno);
return 0;
}
+EXPORT_SYMBOL_GPL(mpi_set_bit);
/*
* Shift A by N bits to the right.
diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
index 892a246216b9..7e6ff1ce3e9b 100644
--- a/lib/crypto/mpi/mpi-mul.c
+++ b/lib/crypto/mpi/mpi-mul.c
@@ -21,7 +21,7 @@ int mpi_mul(MPI w, MPI u, MPI v)
int usign, vsign, sign_product;
int assign_wp = 0;
mpi_ptr_t tmp_limb = NULL;
- int err;
+ int err = 0;
if (u->nlimbs < v->nlimbs) {
/* Swap U and V. */
diff --git a/lib/crypto/simd.c b/lib/crypto/simd.c
new file mode 100644
index 000000000000..9c36cb3bb49c
--- /dev/null
+++ b/lib/crypto/simd.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SIMD testing utility functions
+ *
+ * Copyright 2024 Google LLC
+ */
+
+#include <crypto/internal/simd.h>
+
+DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
+EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index f37f4d44faa9..837064b83a6c 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interval_tree.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <asm/timex.h>
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 1abb32c0da50..9ec806f989f2 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -461,6 +461,8 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i)
{
size_t n, copied = 0;
+ bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) ||
+ PageHighMem(page);
if (!page_copy_sane(page, offset, bytes))
return 0;
@@ -471,7 +473,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
char *p;
n = bytes - copied;
- if (PageHighMem(page)) {
+ if (uses_kmap) {
page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
n = min_t(size_t, n, PAGE_SIZE - offset);
@@ -482,7 +484,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
kunmap_atomic(p);
copied += n;
offset += n;
- } while (PageHighMem(page) && copied != bytes && n > 0);
+ } while (uses_kmap && copied != bytes && n > 0);
return copied;
}
@@ -1021,15 +1023,18 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
size_t part = PAGE_SIZE - offset % PAGE_SIZE;
- part = umin(part, umin(maxsize - extracted, fsize - offset));
- count -= part;
- iov_offset += part;
- extracted += part;
+ if (offset < fsize) {
+ part = umin(part, umin(maxsize - extracted, fsize - offset));
+ count -= part;
+ iov_offset += part;
+ extracted += part;
+
+ *pages = folio_page(folio, offset / PAGE_SIZE);
+ get_page(*pages);
+ pages++;
+ maxpages--;
+ }
- *pages = folio_page(folio, offset / PAGE_SIZE);
- get_page(*pages);
- pages++;
- maxpages--;
if (maxpages == 0 || extracted >= maxsize)
break;
@@ -1677,8 +1682,8 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
}
/*
- * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
- * not get references on the pages, nor does it get a pin on them.
+ * Extract a list of virtually contiguous pages from an ITER_BVEC iterator.
+ * This does not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
@@ -1686,35 +1691,59 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
- struct page **p, *page;
- size_t skip = i->iov_offset, offset, size;
- int k;
+ size_t skip = i->iov_offset, size = 0;
+ struct bvec_iter bi;
+ int k = 0;
- for (;;) {
- if (i->nr_segs == 0)
- return 0;
- size = min(maxsize, i->bvec->bv_len - skip);
- if (size)
- break;
+ if (i->nr_segs == 0)
+ return 0;
+
+ if (i->iov_offset == i->bvec->bv_len) {
i->iov_offset = 0;
i->nr_segs--;
i->bvec++;
skip = 0;
}
+ bi.bi_idx = 0;
+ bi.bi_size = maxsize;
+ bi.bi_bvec_done = skip;
+
+ maxpages = want_pages_array(pages, maxsize, skip, maxpages);
+
+ while (bi.bi_size && bi.bi_idx < i->nr_segs) {
+ struct bio_vec bv = bvec_iter_bvec(i->bvec, bi);
+
+ /*
+ * The iov_iter_extract_pages interface only allows an offset
+ * into the first page. Break out of the loop if we see an
+ * offset into subsequent pages, the caller will have to call
+ * iov_iter_extract_pages again for the reminder.
+ */
+ if (k) {
+ if (bv.bv_offset)
+ break;
+ } else {
+ *offset0 = bv.bv_offset;
+ }
- skip += i->bvec->bv_offset;
- page = i->bvec->bv_page + skip / PAGE_SIZE;
- offset = skip % PAGE_SIZE;
- *offset0 = offset;
+ (*pages)[k++] = bv.bv_page;
+ size += bv.bv_len;
- maxpages = want_pages_array(pages, size, offset, maxpages);
- if (!maxpages)
- return -ENOMEM;
- p = *pages;
- for (k = 0; k < maxpages; k++)
- p[k] = page + k;
+ if (k >= maxpages)
+ break;
+
+ /*
+ * We are done when the end of the bvec doesn't align to a page
+ * boundary as that would create a hole in the returned space.
+ * The caller will handle this with another call to
+ * iov_iter_extract_pages.
+ */
+ if (bv.bv_offset + bv.bv_len != PAGE_SIZE)
+ break;
+
+ bvec_iter_advance_single(i->bvec, &bi, bv.bv_len);
+ }
- size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
iov_iter_advance(i, size);
return size;
}
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 7511442ea98f..7734e33156f9 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -9,6 +9,7 @@
#include <kunit/static_stub.h>
#include <kunit/test.h>
#include <linux/ktime.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 6f6a5fc85b42..6e0c019f71b6 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2710,6 +2710,43 @@ static void local_lock_3B(void)
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline const char *rw_semaphore_lockdep_name(struct rw_semaphore *rwsem)
+{
+ return rwsem->dep_map.name;
+}
+#else
+static inline const char *rw_semaphore_lockdep_name(struct rw_semaphore *rwsem)
+{
+ return NULL;
+}
+#endif
+
+static void test_lockdep_set_subclass_name(void)
+{
+ const char *name_before = rw_semaphore_lockdep_name(&rwsem_X1);
+ const char *name_after;
+
+ lockdep_set_subclass(&rwsem_X1, 1);
+ name_after = rw_semaphore_lockdep_name(&rwsem_X1);
+ DEBUG_LOCKS_WARN_ON(name_before != name_after);
+}
+
+/*
+ * lockdep_set_subclass() should reuse the existing lock class name instead
+ * of creating a new one.
+ */
+static void lockdep_set_subclass_name_test(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | lockdep_set_subclass() name test|\n");
+ printk(" -----------------------------------\n");
+
+ print_testname("compare name before and after");
+ dotest(test_lockdep_set_subclass_name, SUCCESS, LOCKTYPE_RWSEM);
+ pr_cont("\n");
+}
+
static void local_lock_tests(void)
{
printk(" --------------------------------------------------------------------------\n");
@@ -2920,6 +2957,8 @@ void locking_selftest(void)
dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
pr_cont("\n");
+ lockdep_set_subclass_name_test();
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 20990ecba2dd..3619301dda2e 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -2196,6 +2196,8 @@ static inline void mas_node_or_none(struct ma_state *mas,
/*
* mas_wr_node_walk() - Find the correct offset for the index in the @mas.
+ * If @mas->index cannot be found within the containing
+ * node, we traverse to the last entry in the node.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
@@ -3532,7 +3534,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
return true;
}
-static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
+static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
@@ -3541,11 +3543,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
- return true;
+ return;
mas_wr_walk_traverse(wr_mas);
-
}
- return true;
}
/*
* mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
@@ -3765,8 +3765,8 @@ static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas)
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
- /* Copy r_mas into b_node. */
- if (r_mas.offset <= r_mas.end)
+ /* Copy r_mas into b_node if there is anything to copy. */
+ if (r_mas.max > r_mas.last)
mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
&b_node, b_node.b_end + 1);
else
@@ -4218,7 +4218,7 @@ static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
/* Potential spanning rebalance collapsing a node */
if (new_end < mt_min_slots[wr_mas->type]) {
- if (!mte_is_root(mas->node)) {
+ if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) {
mas->store_type = wr_rebalance;
return;
}
diff --git a/lib/objpool.c b/lib/objpool.c
index 234f9d0bd081..b998b720c732 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
* warm caches and TLB hits. in default vmalloc is used to
* reduce the pressure of kernel slab system. as we know,
* mimimal size of vmalloc is one page since vmalloc would
- * always align the requested size to page size
+ * always align the requested size to page size.
+ * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
+ * allocate percpu slot with kmalloc.
*/
- if (pool->gfp & GFP_ATOMIC)
- slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
- else
+ slot = NULL;
+
+ if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC)
slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
cpu_to_node(i), __builtin_return_address(0));
- if (!slot)
- return -ENOMEM;
+
+ if (!slot) {
+ slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
+ if (!slot)
+ return -ENOMEM;
+ }
memset(slot, 0, size);
pool->cpu_slots[i] = slot;
diff --git a/lib/random32.c b/lib/random32.c
index 0a5a0e3600c8..24e7acd9343f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -36,7 +36,7 @@
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/jiffies.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 41ae3c7570d3..8655a76d29a1 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rbtree_augmented.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <asm/timex.h>
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 80e39f003344..33564f965958 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -141,7 +141,7 @@ static void test_kmalloc_redzone_access(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
- u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18);
+ u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
kasan_disable_current();
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index fa5edd6ef7f7..2eed1ad958e9 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -14,7 +14,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/highmem.h>
#include <linux/sched.h>
diff --git a/lib/test_parman.c b/lib/test_parman.c
index 35e32243693c..f9b97426a337 100644
--- a/lib/test_parman.c
+++ b/lib/test_parman.c
@@ -39,7 +39,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/parman.h>
#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index 7257b1768545..44f8508c9d88 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/printk.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/string.h>