diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 35 | ||||
-rw-r--r-- | lib/Makefile | 9 | ||||
-rw-r--r-- | lib/cpumask.c | 52 | ||||
-rw-r--r-- | lib/crypto/blake2s-selftest.c | 25 | ||||
-rw-r--r-- | lib/errname.c | 22 | ||||
-rw-r--r-- | lib/find_bit.c | 9 | ||||
-rw-r--r-- | lib/hashtable_test.c | 317 | ||||
-rw-r--r-- | lib/iov_iter.c | 284 | ||||
-rw-r--r-- | lib/kunit/Makefile | 4 | ||||
-rw-r--r-- | lib/kunit/hooks-impl.h | 31 | ||||
-rw-r--r-- | lib/kunit/hooks.c | 21 | ||||
-rw-r--r-- | lib/kunit/kunit-example-test.c | 38 | ||||
-rw-r--r-- | lib/kunit/static_stub.c | 123 | ||||
-rw-r--r-- | lib/kunit/test.c | 15 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 3 | ||||
-rw-r--r-- | lib/nmi_backtrace.c | 2 | ||||
-rw-r--r-- | lib/test_kmod.c | 11 | ||||
-rw-r--r-- | lib/test_kprobes.c | 39 | ||||
-rw-r--r-- | lib/usercopy.c | 7 |
19 files changed, 982 insertions, 65 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 02ee440f7be3..1dd4bd7dc271 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -389,6 +389,15 @@ config PAHOLE_HAS_BTF_TAG btf_decl_tag) or not. Currently only clang compiler implements these attributes, so make the config depend on CC_IS_CLANG. +config PAHOLE_HAS_LANG_EXCLUDE + def_bool PAHOLE_VERSION >= 124 + help + Support for the --lang_exclude flag which makes pahole exclude + compilation units from the supplied language. Used in Kbuild to + omit Rust CUs which are not supported in version 1.24 of pahole, + otherwise it would emit malformed kernel and module binaries when + using DEBUG_INFO_BTF_MODULES. + config DEBUG_INFO_BTF_MODULES def_bool y depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF @@ -1553,6 +1562,17 @@ config TRACE_IRQFLAGS_NMI depends on TRACE_IRQFLAGS depends on TRACE_IRQFLAGS_NMI_SUPPORT +config NMI_CHECK_CPU + bool "Debugging for CPUs failing to respond to backtrace requests" + depends on DEBUG_KERNEL + depends on X86 + default n + help + Enables debug prints when a CPU fails to respond to a given + backtrace NMI. These prints provide some reasons why a CPU + might legitimately be failing to respond, for example, if it + is offline of if ignore_nmis is set. + config DEBUG_IRQFLAGS bool "Debug IRQ flag manipulation" help @@ -2497,6 +2517,19 @@ config LIST_KUNIT_TEST If unsure, say N. +config HASHTABLE_KUNIT_TEST + tristate "KUnit Test for Kernel Hashtable structures" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + This builds the hashtable KUnit test suite. + It tests the basic functionality of the API defined in + include/linux/hashtable.h. For more information on KUnit and + unit tests in general please refer to the KUnit documentation + in Documentation/dev-tools/kunit/. + + If unsure, say N. + config LINEAR_RANGES_TEST tristate "KUnit test for linear_ranges" depends on KUNIT @@ -2882,6 +2915,4 @@ config RUST_BUILD_ASSERT_ALLOW endmenu # "Rust" -source "Documentation/Kconfig" - endmenu # Kernel hacking diff --git a/lib/Makefile b/lib/Makefile index 36938c564a2a..a269af847e2e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -126,6 +126,14 @@ CFLAGS_test_fpu.o += $(FPU_CFLAGS) obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ obj-$(CONFIG_KUNIT) += kunit/ +# Include the KUnit hooks unconditionally. They'll compile to nothing if +# CONFIG_KUNIT=n, otherwise will be a small table of static data (static key, +# function pointers) which need to be built-in even when KUnit is a module. +ifeq ($(CONFIG_KUNIT), m) +obj-y += kunit/hooks.o +else +obj-$(CONFIG_KUNIT) += kunit/hooks.o +endif ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -369,6 +377,7 @@ obj-$(CONFIG_PLDMFW) += pldmfw/ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o +obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o obj-$(CONFIG_BITS_TEST) += test_bits.o obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o diff --git a/lib/cpumask.c b/lib/cpumask.c index c7c392514fd3..e7258836b60b 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -110,15 +110,33 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) #endif /** - * cpumask_local_spread - select the i'th cpu with local numa cpu's first + * cpumask_local_spread - select the i'th cpu based on NUMA distances * @i: index number * @node: local numa_node * - * This function selects an online CPU according to a numa aware policy; - * local cpus are returned first, followed by non-local ones, then it - * wraps around. + * Returns online CPU according to a numa aware policy; local cpus are returned + * first, followed by non-local ones, then it wraps around. * - * It's not very efficient, but useful for setup. + * For those who wants to enumerate all CPUs based on their NUMA distances, + * i.e. call this function in a loop, like: + * + * for (i = 0; i < num_online_cpus(); i++) { + * cpu = cpumask_local_spread(i, node); + * do_something(cpu); + * } + * + * There's a better alternative based on for_each()-like iterators: + * + * for_each_numa_hop_mask(mask, node) { + * for_each_cpu_andnot(cpu, mask, prev) + * do_something(cpu); + * prev = mask; + * } + * + * It's simpler and more verbose than above. Complexity of iterator-based + * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while + * cpumask_local_spread() when called for each cpu is + * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)). */ unsigned int cpumask_local_spread(unsigned int i, int node) { @@ -127,24 +145,12 @@ unsigned int cpumask_local_spread(unsigned int i, int node) /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - if (node == NUMA_NO_NODE) { - cpu = cpumask_nth(i, cpu_online_mask); - if (cpu < nr_cpu_ids) - return cpu; - } else { - /* NUMA first. */ - cpu = cpumask_nth_and(i, cpu_online_mask, cpumask_of_node(node)); - if (cpu < nr_cpu_ids) - return cpu; - - i -= cpumask_weight_and(cpu_online_mask, cpumask_of_node(node)); - - /* Skip NUMA nodes, done above. */ - cpu = cpumask_nth_andnot(i, cpu_online_mask, cpumask_of_node(node)); - if (cpu < nr_cpu_ids) - return cpu; - } - BUG(); + cpu = (node == NUMA_NO_NODE) ? + cpumask_nth(i, cpu_online_mask) : + sched_numa_find_nth_cpu(cpu_online_mask, i, node); + + WARN_ON(cpu >= nr_cpu_ids); + return cpu; } EXPORT_SYMBOL(cpumask_local_spread); diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 7d77dea15587..d0634ed6a937 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -545,7 +545,7 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, }, }; -bool __init blake2s_selftest(void) +static bool __init noinline_for_stack blake2s_digest_test(void) { u8 key[BLAKE2S_KEY_SIZE]; u8 buf[ARRAY_SIZE(blake2s_testvecs)]; @@ -589,11 +589,20 @@ bool __init blake2s_selftest(void) } } + return success; +} + +static bool __init noinline_for_stack blake2s_random_test(void) +{ + struct blake2s_state state; + bool success = true; + int i, l; + for (i = 0; i < 32; ++i) { enum { TEST_ALIGNMENT = 16 }; - u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1] + u8 blocks[BLAKE2S_BLOCK_SIZE * 2 + TEST_ALIGNMENT - 1] __aligned(TEST_ALIGNMENT); - u8 blocks[BLAKE2S_BLOCK_SIZE * 2]; + u8 *unaligned_block = blocks + BLAKE2S_BLOCK_SIZE; struct blake2s_state state1, state2; get_random_bytes(blocks, sizeof(blocks)); @@ -630,3 +639,13 @@ bool __init blake2s_selftest(void) return success; } + +bool __init blake2s_selftest(void) +{ + bool success; + + success = blake2s_digest_test(); + success &= blake2s_random_test(); + + return success; +} diff --git a/lib/errname.c b/lib/errname.c index 05cbf731545f..67739b174a8c 100644 --- a/lib/errname.c +++ b/lib/errname.c @@ -21,6 +21,7 @@ static const char *names_0[] = { E(EADDRNOTAVAIL), E(EADV), E(EAFNOSUPPORT), + E(EAGAIN), /* EWOULDBLOCK */ E(EALREADY), E(EBADE), E(EBADF), @@ -31,15 +32,17 @@ static const char *names_0[] = { E(EBADSLT), E(EBFONT), E(EBUSY), -#ifdef ECANCELLED - E(ECANCELLED), -#endif + E(ECANCELED), /* ECANCELLED */ E(ECHILD), E(ECHRNG), E(ECOMM), E(ECONNABORTED), + E(ECONNREFUSED), /* EREFUSED */ E(ECONNRESET), + E(EDEADLK), /* EDEADLOCK */ +#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */ E(EDEADLOCK), +#endif E(EDESTADDRREQ), E(EDOM), E(EDOTDOT), @@ -166,14 +169,17 @@ static const char *names_0[] = { E(EUSERS), E(EXDEV), E(EXFULL), - - E(ECANCELED), /* ECANCELLED */ - E(EAGAIN), /* EWOULDBLOCK */ - E(ECONNREFUSED), /* EREFUSED */ - E(EDEADLK), /* EDEADLOCK */ }; #undef E +#ifdef EREFUSED /* parisc */ +static_assert(EREFUSED == ECONNREFUSED); +#endif +#ifdef ECANCELLED /* parisc */ +static_assert(ECANCELLED == ECANCELED); +#endif +static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */ + #define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err static const char *names_512[] = { E(ERESTARTSYS), diff --git a/lib/find_bit.c b/lib/find_bit.c index 18bc0a7ac8ee..c10920e66788 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -155,6 +155,15 @@ unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned l } EXPORT_SYMBOL(__find_nth_andnot_bit); +unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size, unsigned long n) +{ + return FIND_NTH_BIT(addr1[idx] & addr2[idx] & ~addr3[idx], size, n); +} +EXPORT_SYMBOL(__find_nth_and_andnot_bit); + #ifndef find_next_and_bit unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long nbits, unsigned long start) diff --git a/lib/hashtable_test.c b/lib/hashtable_test.c new file mode 100644 index 000000000000..1d1b3288dee2 --- /dev/null +++ b/lib/hashtable_test.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test for the Kernel Hashtable structures. + * + * Copyright (C) 2022, Google LLC. + * Author: Rae Moar <rmoar@google.com> + */ +#include <kunit/test.h> + +#include <linux/hashtable.h> + +struct hashtable_test_entry { + int key; + int data; + struct hlist_node node; + int visited; +}; + +static void hashtable_test_hash_init(struct kunit *test) +{ + /* Test the different ways of initialising a hashtable. */ + DEFINE_HASHTABLE(hash1, 2); + DECLARE_HASHTABLE(hash2, 3); + + /* When using DECLARE_HASHTABLE, must use hash_init to + * initialize the hashtable. + */ + hash_init(hash2); + + KUNIT_EXPECT_TRUE(test, hash_empty(hash1)); + KUNIT_EXPECT_TRUE(test, hash_empty(hash2)); +} + +static void hashtable_test_hash_empty(struct kunit *test) +{ + struct hashtable_test_entry a; + DEFINE_HASHTABLE(hash, 1); + + KUNIT_EXPECT_TRUE(test, hash_empty(hash)); + + a.key = 1; + a.data = 13; + hash_add(hash, &a.node, a.key); + + /* Hashtable should no longer be empty. */ + KUNIT_EXPECT_FALSE(test, hash_empty(hash)); +} + +static void hashtable_test_hash_hashed(struct kunit *test) +{ + struct hashtable_test_entry a, b; + DEFINE_HASHTABLE(hash, 4); + + a.key = 1; + a.data = 13; + hash_add(hash, &a.node, a.key); + b.key = 1; + b.data = 2; + hash_add(hash, &b.node, b.key); + + KUNIT_EXPECT_TRUE(test, hash_hashed(&a.node)); + KUNIT_EXPECT_TRUE(test, hash_hashed(&b.node)); +} + +static void hashtable_test_hash_add(struct kunit *test) +{ + struct hashtable_test_entry a, b, *x; + int bkt; + DEFINE_HASHTABLE(hash, 3); + + a.key = 1; + a.data = 13; + a.visited = 0; + hash_add(hash, &a.node, a.key); + b.key = 2; + b.data = 10; + b.visited = 0; + hash_add(hash, &b.node, b.key); + + hash_for_each(hash, bkt, x, node) { + x->visited++; + if (x->key == a.key) + KUNIT_EXPECT_EQ(test, x->data, 13); + else if (x->key == b.key) + KUNIT_EXPECT_EQ(test, x->data, 10); + else + KUNIT_FAIL(test, "Unexpected key in hashtable."); + } + + /* Both entries should have been visited exactly once. */ + KUNIT_EXPECT_EQ(test, a.visited, 1); + KUNIT_EXPECT_EQ(test, b.visited, 1); +} + +static void hashtable_test_hash_del(struct kunit *test) +{ + struct hashtable_test_entry a, b, *x; + DEFINE_HASHTABLE(hash, 6); + + a.key = 1; + a.data = 13; + hash_add(hash, &a.node, a.key); + b.key = 2; + b.data = 10; + b.visited = 0; + hash_add(hash, &b.node, b.key); + + hash_del(&b.node); + hash_for_each_possible(hash, x, node, b.key) { + x->visited++; + KUNIT_EXPECT_NE(test, x->key, b.key); + } + + /* The deleted entry should not have been visited. */ + KUNIT_EXPECT_EQ(test, b.visited, 0); + + hash_del(&a.node); + + /* The hashtable should be empty. */ + KUNIT_EXPECT_TRUE(test, hash_empty(hash)); +} + +static void hashtable_test_hash_for_each(struct kunit *test) +{ + struct hashtable_test_entry entries[3]; + struct hashtable_test_entry *x; + int bkt, i, j, count; + DEFINE_HASHTABLE(hash, 3); + + /* Add three entries to the hashtable. */ + for (i = 0; i < 3; i++) { + entries[i].key = i; + entries[i].data = i + 10; + entries[i].visited = 0; + hash_add(hash, &entries[i].node, entries[i].key); + } + + count = 0; + hash_for_each(hash, bkt, x, node) { + x->visited += 1; + KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable."); + KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable."); + count++; + } + + /* Should have visited each entry exactly once. */ + KUNIT_EXPECT_EQ(test, count, 3); + for (j = 0; j < 3; j++) + KUNIT_EXPECT_EQ(test, entries[j].visited, 1); +} + +static void hashtable_test_hash_for_each_safe(struct kunit *test) +{ + struct hashtable_test_entry entries[3]; + struct hashtable_test_entry *x; + struct hlist_node *tmp; + int bkt, i, j, count; + DEFINE_HASHTABLE(hash, 3); + + /* Add three entries to the hashtable. */ + for (i = 0; i < 3; i++) { + entries[i].key = i; + entries[i].data = i + 10; + entries[i].visited = 0; + hash_add(hash, &entries[i].node, entries[i].key); + } + + count = 0; + hash_for_each_safe(hash, bkt, tmp, x, node) { + x->visited += 1; + KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable."); + KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable."); + count++; + + /* Delete entry during loop. */ + hash_del(&x->node); + } + + /* Should have visited each entry exactly once. */ + KUNIT_EXPECT_EQ(test, count, 3); + for (j = 0; j < 3; j++) + KUNIT_EXPECT_EQ(test, entries[j].visited, 1); +} + +static void hashtable_test_hash_for_each_possible(struct kunit *test) +{ + struct hashtable_test_entry entries[4]; + struct hashtable_test_entry *x, *y; + int buckets[2]; + int bkt, i, j, count; + DEFINE_HASHTABLE(hash, 5); + + /* Add three entries with key = 0 to the hashtable. */ + for (i = 0; i < 3; i++) { + entries[i].key = 0; + entries[i].data = i; + entries[i].visited = 0; + hash_add(hash, &entries[i].node, entries[i].key); + } + + /* Add an entry with key = 1. */ + entries[3].key = 1; + entries[3].data = 3; + entries[3].visited = 0; + hash_add(hash, &entries[3].node, entries[3].key); + + count = 0; + hash_for_each_possible(hash, x, node, 0) { + x->visited += 1; + KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable."); + KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable."); + count++; + } + + /* Should have visited each entry with key = 0 exactly once. */ + for (j = 0; j < 3; j++) + KUNIT_EXPECT_EQ(test, entries[j].visited, 1); + + /* Save the buckets for the different keys. */ + hash_for_each(hash, bkt, y, node) { + KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable."); + KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable."); + buckets[y->key] = bkt; + } + + /* If entry with key = 1 is in the same bucket as the entries with + * key = 0, check it was visited. Otherwise ensure that only three + * entries were visited. + */ + if (buckets[0] == buckets[1]) { + KUNIT_EXPECT_EQ(test, count, 4); + KUNIT_EXPECT_EQ(test, entries[3].visited, 1); + } else { + KUNIT_EXPECT_EQ(test, count, 3); + KUNIT_EXPECT_EQ(test, entries[3].visited, 0); + } +} + +static void hashtable_test_hash_for_each_possible_safe(struct kunit *test) +{ + struct hashtable_test_entry entries[4]; + struct hashtable_test_entry *x, *y; + struct hlist_node *tmp; + int buckets[2]; + int bkt, i, j, count; + DEFINE_HASHTABLE(hash, 5); + + /* Add three entries with key = 0 to the hashtable. */ + for (i = 0; i < 3; i++) { + entries[i].key = 0; + entries[i].data = i; + entries[i].visited = 0; + hash_add(hash, &entries[i].node, entries[i].key); + } + + /* Add an entry with key = 1. */ + entries[3].key = 1; + entries[3].data = 3; + entries[3].visited = 0; + hash_add(hash, &entries[3].node, entries[3].key); + + count = 0; + hash_for_each_possible_safe(hash, x, tmp, node, 0) { + x->visited += 1; + KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable."); + KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable."); + count++; + + /* Delete entry during loop. */ + hash_del(&x->node); + } + + /* Should have visited each entry with key = 0 exactly once. */ + for (j = 0; j < 3; j++) + KUNIT_EXPECT_EQ(test, entries[j].visited, 1); + + /* Save the buckets for the different keys. */ + hash_for_each(hash, bkt, y, node) { + KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable."); + KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable."); + buckets[y->key] = bkt; + } + + /* If entry with key = 1 is in the same bucket as the entries with + * key = 0, check it was visited. Otherwise ensure that only three + * entries were visited. + */ + if (buckets[0] == buckets[1]) { + KUNIT_EXPECT_EQ(test, count, 4); + KUNIT_EXPECT_EQ(test, entries[3].visited, 1); + } else { + KUNIT_EXPECT_EQ(test, count, 3); + KUNIT_EXPECT_EQ(test, entries[3].visited, 0); + } +} + +static struct kunit_case hashtable_test_cases[] = { + KUNIT_CASE(hashtable_test_hash_init), + KUNIT_CASE(hashtable_test_hash_empty), + KUNIT_CASE(hashtable_test_hash_hashed), + KUNIT_CASE(hashtable_test_hash_add), + KUNIT_CASE(hashtable_test_hash_del), + KUNIT_CASE(hashtable_test_hash_for_each), + KUNIT_CASE(hashtable_test_hash_for_each_safe), + KUNIT_CASE(hashtable_test_hash_for_each_possible), + KUNIT_CASE(hashtable_test_hash_for_each_possible_safe), + {}, +}; + +static struct kunit_suite hashtable_test_module = { + .name = "hashtable", + .test_cases = hashtable_test_cases, +}; + +kunit_test_suites(&hashtable_test_module); + +MODULE_LICENSE("GPL"); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index d9b3332c8405..274014e4eafe 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -186,12 +186,6 @@ static int copyin(void *to, const void __user *from, size_t n) return res; } -static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, - unsigned int slot) -{ - return &pipe->bufs[slot & (pipe->ring_size - 1)]; -} - #ifdef PIPE_PARANOIA static bool sanity(const struct iov_iter *i) { @@ -1432,9 +1426,9 @@ static struct page *first_bvec_segment(const struct iov_iter *i, static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, size_t *start, - unsigned int gup_flags) + iov_iter_extraction_t extraction_flags) { - unsigned int n; + unsigned int n, gup_flags = 0; if (maxsize > i->count) maxsize = i->count; @@ -1442,6 +1436,8 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, return 0; if (maxsize > MAX_RW_COUNT) maxsize = MAX_RW_COUNT; + if (extraction_flags & ITER_ALLOW_P2PDMA) + gup_flags |= FOLL_PCI_P2PDMA; if (likely(user_backed_iter(i))) { unsigned long addr; @@ -1495,14 +1491,14 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, - size_t *start, unsigned gup_flags) + size_t *start, iov_iter_extraction_t extraction_flags) { if (!maxpages) return 0; BUG_ON(!pages); return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, - start, gup_flags); + start, extraction_flags); } EXPORT_SYMBOL_GPL(iov_iter_get_pages); @@ -1515,14 +1511,14 @@ EXPORT_SYMBOL(iov_iter_get_pages2); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, - size_t *start, unsigned gup_flags) + size_t *start, iov_iter_extraction_t extraction_flags) { ssize_t len; *pages = NULL; len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start, - gup_flags); + extraction_flags); if (len <= 0) { kvfree(*pages); *pages = NULL; @@ -1925,3 +1921,267 @@ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) i->iov -= state->nr_segs - i->nr_segs; i->nr_segs = state->nr_segs; } + +/* + * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not + * get references on the pages, nor does it get a pin on them. + */ +static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, + struct page ***pages, size_t maxsize, + unsigned int maxpages, + iov_iter_extraction_t extraction_flags, + size_t *offset0) +{ + struct page *page, **p; + unsigned int nr = 0, offset; + loff_t pos = i->xarray_start + i->iov_offset; + pgoff_t index = pos >> PAGE_SHIFT; + XA_STATE(xas, i->xarray, index); + + offset = pos & ~PAGE_MASK; + *offset0 = offset; + + maxpages = want_pages_array(pages, maxsize, offset, maxpages); + if (!maxpages) + return -ENOMEM; + p = *pages; + + rcu_read_lock(); + for (page = xas_load(&xas); page; page = xas_next(&xas)) { + if (xas_retry(&xas, page)) + continue; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) { + xas_reset(&xas); + continue; + } + + p[nr++] = find_subpage(page, xas.xa_index); + if (nr == maxpages) + break; + } + rcu_read_unlock(); + + maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); + iov_iter_advance(i, maxsize); + return maxsize; +} + +/* + * Extract a list of contiguous pages from an ITER_BVEC iterator. This does + * not get references on the pages, nor does it get a pin on them. + */ +static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, + struct page ***pages, size_t maxsize, + unsigned int maxpages, + iov_iter_extraction_t extraction_flags, + size_t *offset0) +{ + struct page **p, *page; + size_t skip = i->iov_offset, offset; + int k; + + for (;;) { + if (i->nr_segs == 0) + return 0; + maxsize = min(maxsize, i->bvec->bv_len - skip); + if (maxsize) + break; + i->iov_offset = 0; + i->nr_segs--; + i->bvec++; + skip = 0; + } + + skip += i->bvec->bv_offset; + page = i->bvec->bv_page + skip / PAGE_SIZE; + offset = skip % PAGE_SIZE; + *offset0 = offset; + + maxpages = want_pages_array(pages, maxsize, offset, maxpages); + if (!maxpages) + return -ENOMEM; + p = *pages; + for (k = 0; k < maxpages; k++) + p[k] = page + k; + + maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset); + iov_iter_advance(i, maxsize); + return maxsize; +} + +/* + * Extract a list of virtually contiguous pages from an ITER_KVEC iterator. + * This does not get references on the pages, nor does it get a pin on them. + */ +static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, + struct page ***pages, size_t maxsize, + unsigned int maxpages, + iov_iter_extraction_t extraction_flags, + size_t *offset0) +{ + struct page **p, *page; + const void *kaddr; + size_t skip = i->iov_offset, offset, len; + int k; + + for (;;) { + if (i->nr_segs == 0) + return 0; + maxsize = min(maxsize, i->kvec->iov_len - skip); + if (maxsize) + break; + i->iov_offset = 0; + i->nr_segs--; + i->kvec++; + skip = 0; + } + + kaddr = i->kvec->iov_base + skip; + offset = (unsigned long)kaddr & ~PAGE_MASK; + *offset0 = offset; + + maxpages = want_pages_array(pages, maxsize, offset, maxpages); + if (!maxpages) + return -ENOMEM; + p = *pages; + + kaddr -= offset; + len = offset + maxsize; + for (k = 0; k < maxpages; k++) { + size_t seg = min_t(size_t, len, PAGE_SIZE); + + if (is_vmalloc_or_module_addr(kaddr)) + page = vmalloc_to_page(kaddr); + else + page = virt_to_page(kaddr); + + p[k] = page; + len -= seg; + kaddr += PAGE_SIZE; + } + + maxsize = min_t(size_t, maxsize, maxpages * PAGE_SIZE - offset); + iov_iter_advance(i, maxsize); + return maxsize; +} + +/* + * Extract a list of contiguous pages from a user iterator and get a pin on + * each of them. This should only be used if the iterator is user-backed + * (IOBUF/UBUF). + * + * It does not get refs on the pages, but the pages must be unpinned by the + * caller once the transfer is complete. + * + * This is safe to be used where background IO/DMA *is* going to be modifying + * the buffer; using a pin rather than a ref makes forces fork() to give the + * child a copy of the page. + */ +static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, + struct page ***pages, + size_t maxsize, + unsigned int maxpages, + iov_iter_extraction_t extraction_flags, + size_t *offset0) +{ + unsigned long addr; + unsigned int gup_flags = 0; + size_t offset; + int res; + + if (i->data_source == ITER_DEST) + gup_flags |= FOLL_WRITE; + if (extraction_flags & ITER_ALLOW_P2PDMA) + gup_flags |= FOLL_PCI_P2PDMA; + if (i->nofault) + gup_flags |= FOLL_NOFAULT; + + addr = first_iovec_segment(i, &maxsize); + *offset0 = offset = addr % PAGE_SIZE; + addr &= PAGE_MASK; + maxpages = want_pages_array(pages, maxsize, offset, maxpages); + if (!maxpages) + return -ENOMEM; + res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages); + if (unlikely(res <= 0)) + return res; + maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); + iov_iter_advance(i, maxsize); + return maxsize; +} + +/** + * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator + * @i: The iterator to extract from + * @pages: Where to return the list of pages + * @maxsize: The maximum amount of iterator to extract + * @maxpages: The maximum size of the list of pages + * @extraction_flags: Flags to qualify request + * @offset0: Where to return the starting offset into (*@pages)[0] + * + * Extract a list of contiguous pages from the current point of the iterator, + * advancing the iterator. The maximum number of pages and the maximum amount + * of page contents can be set. + * + * If *@pages is NULL, a page list will be allocated to the required size and + * *@pages will be set to its base. If *@pages is not NULL, it will be assumed + * that the caller allocated a page list at least @maxpages in size and this + * will be filled in. + * + * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA + * be allowed on the pages extracted. + * + * The iov_iter_extract_will_pin() function can be used to query how cleanup + * should be performed. + * + * Extra refs or pins on the pages may be obtained as follows: + * + * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be + * added to the pages, but refs will not be taken. + * iov_iter_extract_will_pin() will return true. + * + * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are + * merely listed; no extra refs or pins are obtained. + * iov_iter_extract_will_pin() will return 0. + * + * Note also: + * + * (*) Use with ITER_DISCARD is not supported as that has no content. + * + * On success, the function sets *@pages to the new pagelist, if allocated, and + * sets *offset0 to the offset into the first page. + * + * It may also return -ENOMEM and -EFAULT. + */ +ssize_t iov_iter_extract_pages(struct iov_iter *i, + struct page ***pages, + size_t maxsize, + unsigned int maxpages, + iov_iter_extraction_t extraction_flags, + size_t *offset0) +{ + maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); + if (!maxsize) + return 0; + + if (likely(user_backed_iter(i))) + return iov_iter_extract_user_pages(i, pages, maxsize, + maxpages, extraction_flags, + offset0); + if (iov_iter_is_kvec(i)) + return iov_iter_extract_kvec_pages(i, pages, maxsize, + maxpages, extraction_flags, + offset0); + if (iov_iter_is_bvec(i)) + return iov_iter_extract_bvec_pages(i, pages, maxsize, + maxpages, extraction_flags, + offset0); + if (iov_iter_is_xarray(i)) + return iov_iter_extract_xarray_pages(i, pages, maxsize, + maxpages, extraction_flags, + offset0); + return -EFAULT; +} +EXPORT_SYMBOL_GPL(iov_iter_extract_pages); diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index 29aff6562b42..da665cd4ea12 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_KUNIT) += kunit.o kunit-objs += test.o \ resource.o \ + static_stub.o \ string-stream.o \ assert.o \ try-catch.o \ @@ -11,6 +12,9 @@ ifeq ($(CONFIG_KUNIT_DEBUGFS),y) kunit-objs += debugfs.o endif +# KUnit 'hooks' are built-in even when KUnit is built as a module. +lib-y += hooks.o + obj-$(CONFIG_KUNIT_TEST) += kunit-test.o # string-stream-test compiles built-in only. diff --git a/lib/kunit/hooks-impl.h b/lib/kunit/hooks-impl.h new file mode 100644 index 000000000000..4e71b2d0143b --- /dev/null +++ b/lib/kunit/hooks-impl.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Declarations for hook implementations. + * + * These will be set as the function pointers in struct kunit_hook_table, + * found in include/kunit/test-bug.h. + * + * Copyright (C) 2023, Google LLC. + * Author: David Gow <davidgow@google.com> + */ + +#ifndef _KUNIT_HOOKS_IMPL_H +#define _KUNIT_HOOKS_IMPL_H + +#include <kunit/test-bug.h> + +/* List of declarations. */ +void __printf(3, 4) __kunit_fail_current_test_impl(const char *file, + int line, + const char *fmt, ...); +void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr); + +/* Code to set all of the function pointers. */ +static inline void kunit_install_hooks(void) +{ + /* Install the KUnit hook functions. */ + kunit_hooks.fail_current_test = __kunit_fail_current_test_impl; + kunit_hooks.get_static_stub_address = __kunit_get_static_stub_address_impl; +} + +#endif /* _KUNIT_HOOKS_IMPL_H */ diff --git a/lib/kunit/hooks.c b/lib/kunit/hooks.c new file mode 100644 index 000000000000..365d98d4953c --- /dev/null +++ b/lib/kunit/hooks.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit 'Hooks' implementation. + * + * This file contains code / structures which should be built-in even when + * KUnit itself is built as a module. + * + * Copyright (C) 2022, Google LLC. + * Author: David Gow <davidgow@google.com> + */ + + +#include <kunit/test-bug.h> + +DEFINE_STATIC_KEY_FALSE(kunit_running); +EXPORT_SYMBOL(kunit_running); + +/* Function pointers for hooks. */ +struct kunit_hooks_table kunit_hooks; +EXPORT_SYMBOL(kunit_hooks); + diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index 66cc4e2365ec..cd8b7e51d02b 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -7,6 +7,7 @@ */ #include <kunit/test.h> +#include <kunit/static_stub.h> /* * This is the most fundamental element of KUnit, the test case. A test case @@ -130,6 +131,42 @@ static void example_all_expect_macros_test(struct kunit *test) KUNIT_ASSERT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!"); } +/* This is a function we'll replace with static stubs. */ +static int add_one(int i) +{ + /* This will trigger the stub if active. */ + KUNIT_STATIC_STUB_REDIRECT(add_one, i); + + return i + 1; +} + +/* This is used as a replacement for the above function. */ +static int subtract_one(int i) +{ + /* We don't need to trigger the stub from the replacement. */ + + return i - 1; +} + +/* + * This test shows the use of static stubs. + */ +static void example_static_stub_test(struct kunit *test) +{ + /* By default, function is not stubbed. */ + KUNIT_EXPECT_EQ(test, add_one(1), 2); + + /* Replace add_one() with subtract_one(). */ + kunit_activate_static_stub(test, add_one, subtract_one); + + /* add_one() is now replaced. */ + KUNIT_EXPECT_EQ(test, add_one(1), 0); + + /* Return add_one() to normal. */ + kunit_deactivate_static_stub(test, add_one); + KUNIT_EXPECT_EQ(test, add_one(1), 2); +} + /* * Here we make a list of all the test cases we want to add to the test suite * below. @@ -145,6 +182,7 @@ static struct kunit_case example_test_cases[] = { KUNIT_CASE(example_skip_test), KUNIT_CASE(example_mark_skipped_test), KUNIT_CASE(example_all_expect_macros_test), + KUNIT_CASE(example_static_stub_test), {} }; diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c new file mode 100644 index 000000000000..92b2cccd5e76 --- /dev/null +++ b/lib/kunit/static_stub.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit function redirection (static stubbing) API. + * + * Copyright (C) 2022, Google LLC. + * Author: David Gow <davidgow@google.com> + */ + +#include <kunit/test.h> +#include <kunit/static_stub.h> +#include "hooks-impl.h" + + +/* Context for a static stub. This is stored in the resource data. */ +struct kunit_static_stub_ctx { + void *real_fn_addr; + void *replacement_addr; +}; + +static void __kunit_static_stub_resource_free(struct kunit_resource *res) +{ + kfree(res->data); +} + +/* Matching function for kunit_find_resource(). match_data is real_fn_addr. */ +static bool __kunit_static_stub_resource_match(struct kunit *test, + struct kunit_resource *res, + void *match_real_fn_addr) +{ + /* This pointer is only valid if res is a static stub resource. */ + struct kunit_static_stub_ctx *ctx = res->data; + + /* Make sure the resource is a static stub resource. */ + if (res->free != &__kunit_static_stub_resource_free) + return false; + + return ctx->real_fn_addr == match_real_fn_addr; +} + +/* Hook to return the address of the replacement function. */ +void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr) +{ + struct kunit_resource *res; + struct kunit_static_stub_ctx *ctx; + void *replacement_addr; + + res = kunit_find_resource(test, + __kunit_static_stub_resource_match, + real_fn_addr); + + if (!res) + return NULL; + + ctx = res->data; + replacement_addr = ctx->replacement_addr; + kunit_put_resource(res); + return replacement_addr; +} + +void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr) +{ + struct kunit_resource *res; + + KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL, + "Tried to deactivate a NULL stub."); + + /* Look up the existing stub for this function. */ + res = kunit_find_resource(test, + __kunit_static_stub_resource_match, + real_fn_addr); + + /* Error out if the stub doesn't exist. */ + KUNIT_ASSERT_PTR_NE_MSG(test, res, NULL, + "Tried to deactivate a nonexistent stub."); + + /* Free the stub. We 'put' twice, as we got a reference + * from kunit_find_resource() + */ + kunit_remove_resource(test, res); + kunit_put_resource(res); +} +EXPORT_SYMBOL_GPL(kunit_deactivate_static_stub); + +/* Helper function for kunit_activate_static_stub(). The macro does + * typechecking, so use it instead. + */ +void __kunit_activate_static_stub(struct kunit *test, + void *real_fn_addr, + void *replacement_addr) +{ + struct kunit_static_stub_ctx *ctx; + struct kunit_resource *res; + + KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL, + "Tried to activate a stub for function NULL"); + + /* If the replacement address is NULL, deactivate the stub. */ + if (!replacement_addr) { + kunit_deactivate_static_stub(test, replacement_addr); + return; + } + + /* Look up any existing stubs for this function, and replace them. */ + res = kunit_find_resource(test, + __kunit_static_stub_resource_match, + real_fn_addr); + if (res) { + ctx = res->data; + ctx->replacement_addr = replacement_addr; + + /* We got an extra reference from find_resource(), so put it. */ + kunit_put_resource(res); + } else { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ctx->real_fn_addr = real_fn_addr; + ctx->replacement_addr = replacement_addr; + res = kunit_alloc_resource(test, NULL, + &__kunit_static_stub_resource_free, + GFP_KERNEL, ctx); + } +} +EXPORT_SYMBOL_GPL(__kunit_activate_static_stub); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 890ba5b3a981..c9e15bb60058 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -17,17 +17,14 @@ #include <linux/sched.h> #include "debugfs.h" +#include "hooks-impl.h" #include "string-stream.h" #include "try-catch-impl.h" -DEFINE_STATIC_KEY_FALSE(kunit_running); -EXPORT_SYMBOL_GPL(kunit_running); - -#if IS_BUILTIN(CONFIG_KUNIT) /* - * Fail the current test and print an error message to the log. + * Hook to fail the current test and print an error message to the log. */ -void __kunit_fail_current_test(const char *file, int line, const char *fmt, ...) +void __printf(3, 4) __kunit_fail_current_test_impl(const char *file, int line, const char *fmt, ...) { va_list args; int len; @@ -54,8 +51,6 @@ void __kunit_fail_current_test(const char *file, int line, const char *fmt, ...) kunit_err(current->kunit_test, "%s:%d: %s", file, line, buffer); kunit_kfree(current->kunit_test, buffer); } -EXPORT_SYMBOL_GPL(__kunit_fail_current_test); -#endif /* * Enable KUnit tests to run. @@ -778,6 +773,9 @@ EXPORT_SYMBOL_GPL(kunit_cleanup); static int __init kunit_init(void) { + /* Install the KUnit hook functions. */ + kunit_install_hooks(); + kunit_debugfs_init(); #ifdef CONFIG_MODULES return register_module_notifier(&kunit_mod_nb); @@ -789,6 +787,7 @@ late_initcall(kunit_init); static void __exit kunit_exit(void) { + memset(&kunit_hooks, 0, sizeof(kunit_hooks)); #ifdef CONFIG_MODULES unregister_module_notifier(&kunit_mod_nb); #endif diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 39c4c6731094..3cb6bd148fa9 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -504,7 +504,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) while (sg_miter_next(&miter)) { buff = miter.addr; - len = miter.length; + len = min_t(unsigned, miter.length, nbytes); + nbytes -= len; for (x = 0; x < len; x++) { a <<= 8; diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index d01aec6ae15c..5274bbb026d7 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -64,6 +64,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, if (!cpumask_empty(to_cpumask(backtrace_mask))) { pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); + nmi_backtrace_stall_snap(to_cpumask(backtrace_mask)); raise(to_cpumask(backtrace_mask)); } @@ -74,6 +75,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, mdelay(1); touch_softlockup_watchdog(); } + nmi_backtrace_stall_check(to_cpumask(backtrace_mask)); /* * Force flush any remote buffers that might be stuck in IRQ context diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 6423df9fa8dd..43d9dfd57ab7 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -51,12 +51,11 @@ static int num_test_devs; /** * enum kmod_test_case - linker table test case - * - * If you add a test case, please be sure to review if you need to se - * @need_mod_put for your tests case. - * * @TEST_KMOD_DRIVER: stress tests request_module() * @TEST_KMOD_FS_TYPE: stress tests get_fs_type() + * + * If you add a test case, please be sure to review if you need to set + * @need_mod_put for your tests case. */ enum kmod_test_case { __TEST_KMOD_INVALID = 0, @@ -78,7 +77,7 @@ struct test_config { struct kmod_test_device; /** - * kmod_test_device_info - thread info + * struct kmod_test_device_info - thread info * * @ret_sync: return value if request_module() is used, sync request for * @TEST_KMOD_DRIVER @@ -101,7 +100,7 @@ struct kmod_test_device_info { }; /** - * kmod_test_device - test device to help test kmod + * struct kmod_test_device - test device to help test kmod * * @dev_idx: unique ID for test device * @config: configuration for the test diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c index 1c95e5719802..0648f7154f5c 100644 --- a/lib/test_kprobes.c +++ b/lib/test_kprobes.c @@ -14,6 +14,7 @@ static u32 rand1, preh_val, posth_val; static u32 (*target)(u32 value); +static u32 (*recursed_target)(u32 value); static u32 (*target2)(u32 value); static struct kunit *current_test; @@ -27,18 +28,27 @@ static noinline u32 kprobe_target(u32 value) return (value / div_factor); } +static noinline u32 kprobe_recursed_target(u32 value) +{ + return (value / div_factor); +} + static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs) { KUNIT_EXPECT_FALSE(current_test, preemptible()); - preh_val = (rand1 / div_factor); + + preh_val = recursed_target(rand1); return 0; } static void kp_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { + u32 expval = recursed_target(rand1); + KUNIT_EXPECT_FALSE(current_test, preemptible()); - KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor)); + KUNIT_EXPECT_EQ(current_test, preh_val, expval); + posth_val = preh_val + div_factor; } @@ -136,6 +146,29 @@ static void test_kprobes(struct kunit *test) unregister_kprobes(kps, 2); } +static struct kprobe kp_missed = { + .symbol_name = "kprobe_recursed_target", + .pre_handler = kp_pre_handler, + .post_handler = kp_post_handler, +}; + +static void test_kprobe_missed(struct kunit *test) +{ + current_test = test; + preh_val = 0; + posth_val = 0; + + KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed)); + + recursed_target(rand1); + + KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed); + KUNIT_EXPECT_NE(test, 0, preh_val); + KUNIT_EXPECT_NE(test, 0, posth_val); + + unregister_kprobe(&kp_missed); +} + #ifdef CONFIG_KRETPROBES static u32 krph_val; @@ -336,6 +369,7 @@ static int kprobes_test_init(struct kunit *test) { target = kprobe_target; target2 = kprobe_target2; + recursed_target = kprobe_recursed_target; stacktrace_target = kprobe_stacktrace_target; internal_target = kprobe_stacktrace_internal_target; stacktrace_driver = kprobe_stacktrace_driver; @@ -346,6 +380,7 @@ static int kprobes_test_init(struct kunit *test) static struct kunit_case kprobes_testcases[] = { KUNIT_CASE(test_kprobe), KUNIT_CASE(test_kprobes), + KUNIT_CASE(test_kprobe_missed), #ifdef CONFIG_KRETPROBES KUNIT_CASE(test_kretprobe), KUNIT_CASE(test_kretprobes), diff --git a/lib/usercopy.c b/lib/usercopy.c index 1505a52f23a0..d29fe29c6849 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -3,6 +3,7 @@ #include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> #include <linux/uaccess.h> +#include <linux/nospec.h> /* out-of-line parts */ @@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n unsigned long res = n; might_fault(); if (!should_fail_usercopy() && likely(access_ok(from, n))) { + /* + * Ensure that bad access_ok() speculation will not + * lead to nasty side effects *after* the copy is + * finished: + */ + barrier_nospec(); instrument_copy_from_user_before(to, from, n); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); |