aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig12
-rw-r--r--lib/Kconfig.debug37
-rw-r--r--lib/Makefile3
-rw-r--r--lib/checksum.c11
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/crypto/Makefile2
-rw-r--r--lib/crypto/mpi/mpi-bit.c1
-rw-r--r--lib/crypto/simd.c11
-rw-r--r--lib/debugobjects.c849
-rw-r--r--lib/dim/dim.c3
-rw-r--r--lib/dim/net_dim.c10
-rw-r--r--lib/dynamic_queue_limits.c2
-rw-r--r--lib/interval_tree_test.c2
-rw-r--r--lib/iomem_copy.c136
-rw-r--r--lib/iov_iter.c68
-rw-r--r--lib/kunit/debugfs.c9
-rw-r--r--lib/kunit/kunit-test.c2
-rw-r--r--lib/kunit/string-stream-test.c1
-rw-r--r--lib/locking-selftest.c39
-rw-r--r--lib/logic_pio.c4
-rw-r--r--lib/math/test_div64.c85
-rw-r--r--lib/packing.c322
-rw-r--r--lib/packing_test.c413
-rw-r--r--lib/random32.c2
-rw-r--r--lib/rbtree_test.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_parman.c2
-rw-r--r--lib/test_printf.c61
-rw-r--r--lib/test_scanf.c2
-rw-r--r--lib/vsprintf.c57
30 files changed, 1597 insertions, 557 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index b38849af6f13..50d85f38b569 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -40,6 +40,18 @@ config PACKING
When in doubt, say N.
+config PACKING_KUNIT_TEST
+ tristate "KUnit tests for packing library" if !KUNIT_ALL_TESTS
+ depends on PACKING && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds KUnit tests for the packing library.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ When in doubt, say N.
+
config BITREVERSE
tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6798bbbcbd32..5d9eca035d47 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1329,19 +1329,6 @@ config SCHEDSTATS
endmenu
-config DEBUG_TIMEKEEPING
- bool "Enable extra timekeeping sanity checking"
- help
- This option will enable additional timekeeping sanity checks
- which may be helpful when diagnosing issues where timekeeping
- problems are suspected.
-
- This may include checks in the timekeeping hotpaths, so this
- option may have a (very small) performance impact to some
- workloads.
-
- If unsure, say N.
-
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
@@ -1410,22 +1397,14 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst.
config PROVE_RAW_LOCK_NESTING
- bool "Enable raw_spinlock - spinlock nesting checks"
+ bool
depends on PROVE_LOCKING
- default n
+ default y
help
Enable the raw_spinlock vs. spinlock nesting checks which ensure
that the lock nesting rules for PREEMPT_RT enabled kernels are
not violated.
- NOTE: There are known nesting problems. So if you enable this
- option expect lockdep splats until these problems have been fully
- addressed which is work in progress. This config switch allows to
- identify and analyze these problems. It will be removed and the
- check permanently enabled once the main issues have been fixed.
-
- If unsure, select N.
-
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
@@ -1906,7 +1885,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED
- default y if PPC || X86 || ARM64
+ default y if PPC || X86 || ARM64 || S390
help
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
@@ -2116,6 +2095,16 @@ config FAIL_SUNRPC
Provide fault-injection capability for SunRPC and
its consumers.
+config FAIL_SKB_REALLOC
+ bool "Fault-injection capability forcing skb to reallocate"
+ depends on FAULT_INJECTION_DEBUG_FS
+ help
+ Provide fault-injection capability that forces the skb to be
+ reallocated, catching possible invalid pointers to the skb.
+
+ For more information, check
+ Documentation/dev-tools/fault-injection/fault-injection.rst
+
config FAULT_INJECTION_CONFIGFS
bool "Configfs interface for fault-injection capabilities"
depends on FAULT_INJECTION
diff --git a/lib/Makefile b/lib/Makefile
index 773adf88af41..b393dd8151e2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -35,7 +35,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o win_minmax.o memcat_p.o \
- buildid.o objpool.o union_find.o
+ buildid.o objpool.o union_find.o iomem_copy.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -154,6 +154,7 @@ obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o
obj-$(CONFIG_PACKING) += packing.o
+obj-$(CONFIG_PACKING_KUNIT_TEST) += packing_test.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
diff --git a/lib/checksum.c b/lib/checksum.c
index 6860d6b05a17..025ba546e1ec 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -34,15 +34,6 @@
#include <asm/byteorder.h>
#ifndef do_csum
-static inline unsigned short from32to16(unsigned int x)
-{
- /* add up 16-bit and 16-bit for 16+c bit */
- x = (x & 0xffff) + (x >> 16);
- /* add up carry.. */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd;
@@ -90,7 +81,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
#else
result += (*buff << 8);
#endif
- result = from32to16(result);
+ result = csum_from32to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
out:
diff --git a/lib/crc32.c b/lib/crc32.c
index 5649847d0a8d..ff587fee3893 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -205,7 +205,11 @@ EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+EXPORT_SYMBOL(crc32_le_base);
+
u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+EXPORT_SYMBOL(__crc32c_le_base);
+
u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
/*
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 969baab8c805..01fac1cd05a1 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -58,3 +58,5 @@ libcurve25519-y += curve25519-selftest.o
endif
obj-$(CONFIG_MPILIB) += mpi/
+
+obj-$(CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) += simd.o
diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
index 835a2f0622a0..934d81311360 100644
--- a/lib/crypto/mpi/mpi-bit.c
+++ b/lib/crypto/mpi/mpi-bit.c
@@ -95,6 +95,7 @@ int mpi_set_bit(MPI a, unsigned int n)
a->d[limbno] |= (A_LIMB_1<<bitno);
return 0;
}
+EXPORT_SYMBOL_GPL(mpi_set_bit);
/*
* Shift A by N bits to the right.
diff --git a/lib/crypto/simd.c b/lib/crypto/simd.c
new file mode 100644
index 000000000000..9c36cb3bb49c
--- /dev/null
+++ b/lib/crypto/simd.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SIMD testing utility functions
+ *
+ * Copyright 2024 Google LLC
+ */
+
+#include <crypto/internal/simd.h>
+
+DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
+EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 5ce473ad499b..7f50c4480a4e 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -7,25 +7,30 @@
#define pr_fmt(fmt) "ODEBUG: " fmt
+#include <linux/cpu.h>
#include <linux/debugobjects.h>
-#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/hash.h>
+#include <linux/kmemleak.h>
#include <linux/sched.h>
+#include <linux/sched/loadavg.h>
#include <linux/sched/task_stack.h>
#include <linux/seq_file.h>
-#include <linux/debugfs.h>
#include <linux/slab.h>
-#include <linux/hash.h>
-#include <linux/kmemleak.h>
-#include <linux/cpu.h>
+#include <linux/static_key.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
-#define ODEBUG_POOL_SIZE 1024
-#define ODEBUG_POOL_MIN_LEVEL 256
-#define ODEBUG_POOL_PERCPU_SIZE 64
+/* Must be power of two */
#define ODEBUG_BATCH_SIZE 16
+/* Initial values. Must all be a multiple of batch size */
+#define ODEBUG_POOL_SIZE (64 * ODEBUG_BATCH_SIZE)
+#define ODEBUG_POOL_MIN_LEVEL (ODEBUG_POOL_SIZE / 4)
+
+#define ODEBUG_POOL_PERCPU_SIZE (8 * ODEBUG_BATCH_SIZE)
+
#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
@@ -35,7 +40,7 @@
* frequency of 10Hz and about 1024 objects for each freeing operation.
* So it is freeing at most 10k debug objects per second.
*/
-#define ODEBUG_FREE_WORK_MAX 1024
+#define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE)
#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
struct debug_bucket {
@@ -43,16 +48,24 @@ struct debug_bucket {
raw_spinlock_t lock;
};
-/*
- * Debug object percpu free list
- * Access is protected by disabling irq
- */
-struct debug_percpu_free {
- struct hlist_head free_objs;
- int obj_free;
+struct pool_stats {
+ unsigned int cur_used;
+ unsigned int max_used;
+ unsigned int min_fill;
};
-static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
+struct obj_pool {
+ struct hlist_head objects;
+ unsigned int cnt;
+ unsigned int min_cnt;
+ unsigned int max_cnt;
+ struct pool_stats stats;
+} ____cacheline_aligned;
+
+
+static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = {
+ .max_cnt = ODEBUG_POOL_PERCPU_SIZE,
+};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
@@ -60,37 +73,32 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_RAW_SPINLOCK(pool_lock);
-static HLIST_HEAD(obj_pool);
-static HLIST_HEAD(obj_to_free);
+static struct obj_pool pool_global = {
+ .min_cnt = ODEBUG_POOL_MIN_LEVEL,
+ .max_cnt = ODEBUG_POOL_SIZE,
+ .stats = {
+ .min_fill = ODEBUG_POOL_SIZE,
+ },
+};
-/*
- * Because of the presence of percpu free pools, obj_pool_free will
- * under-count those in the percpu free pools. Similarly, obj_pool_used
- * will over-count those in the percpu free pools. Adjustments will be
- * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
- * can be off.
- */
-static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE;
-static int __data_racy obj_pool_free = ODEBUG_POOL_SIZE;
-static int obj_pool_used;
-static int __data_racy obj_pool_max_used;
+static struct obj_pool pool_to_free = {
+ .max_cnt = UINT_MAX,
+};
+
+static HLIST_HEAD(pool_boot);
+
+static unsigned long avg_usage;
static bool obj_freeing;
-/* The number of objs on the global free list */
-static int obj_nr_tofree;
static int __data_racy debug_objects_maxchain __read_mostly;
static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
static int __data_racy debug_objects_fixups __read_mostly;
static int __data_racy debug_objects_warnings __read_mostly;
-static int __data_racy debug_objects_enabled __read_mostly
+static bool __data_racy debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int debug_objects_pool_size __ro_after_init
- = ODEBUG_POOL_SIZE;
-static int debug_objects_pool_min_level __ro_after_init
- = ODEBUG_POOL_MIN_LEVEL;
-static const struct debug_obj_descr *descr_test __read_mostly;
-static struct kmem_cache *obj_cache __ro_after_init;
+static const struct debug_obj_descr *descr_test __read_mostly;
+static struct kmem_cache *obj_cache __ro_after_init;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -101,19 +109,20 @@ static int __data_racy debug_objects_freed;
static void free_obj_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
+static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
+
static int __init enable_object_debug(char *str)
{
- debug_objects_enabled = 1;
+ debug_objects_enabled = true;
return 0;
}
+early_param("debug_objects", enable_object_debug);
static int __init disable_object_debug(char *str)
{
- debug_objects_enabled = 0;
+ debug_objects_enabled = false;
return 0;
}
-
-early_param("debug_objects", enable_object_debug);
early_param("no_debug_objects", disable_object_debug);
static const char *obj_states[ODEBUG_STATE_MAX] = {
@@ -125,61 +134,280 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
[ODEBUG_STATE_NOTAVAILABLE] = "not available",
};
-static void fill_pool(void)
+static __always_inline unsigned int pool_count(struct obj_pool *pool)
+{
+ return READ_ONCE(pool->cnt);
+}
+
+static __always_inline bool pool_should_refill(struct obj_pool *pool)
+{
+ return pool_count(pool) < pool->min_cnt;
+}
+
+static __always_inline bool pool_must_refill(struct obj_pool *pool)
+{
+ return pool_count(pool) < pool->min_cnt / 2;
+}
+
+static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
{
- gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ struct hlist_node *last, *next_batch, *first_batch;
struct debug_obj *obj;
- unsigned long flags;
- if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
+ if (dst->cnt >= dst->max_cnt || !src->cnt)
+ return false;
+
+ first_batch = src->objects.first;
+ obj = hlist_entry(first_batch, typeof(*obj), node);
+ last = obj->batch_last;
+ next_batch = last->next;
+
+ /* Move the next batch to the front of the source pool */
+ src->objects.first = next_batch;
+ if (next_batch)
+ next_batch->pprev = &src->objects.first;
+
+ /* Add the extracted batch to the destination pool */
+ last->next = dst->objects.first;
+ if (last->next)
+ last->next->pprev = &last->next;
+ first_batch->pprev = &dst->objects.first;
+ dst->objects.first = first_batch;
+
+ WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
+ WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
+ return true;
+}
+
+static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
+{
+ struct hlist_node *last;
+ struct debug_obj *obj;
+
+ if (dst->cnt >= dst->max_cnt)
+ return false;
+
+ obj = hlist_entry(head->first, typeof(*obj), node);
+ last = obj->batch_last;
+
+ hlist_splice_init(head, last, &dst->objects);
+ WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
+ return true;
+}
+
+static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
+{
+ struct hlist_node *last, *next;
+ struct debug_obj *obj;
+
+ if (!src->cnt)
+ return false;
+
+ /* Move the complete list to the head */
+ hlist_move_list(&src->objects, head);
+
+ obj = hlist_entry(head->first, typeof(*obj), node);
+ last = obj->batch_last;
+ next = last->next;
+ /* Disconnect the batch from the list */
+ last->next = NULL;
+
+ /* Move the node after last back to the source pool. */
+ src->objects.first = next;
+ if (next)
+ next->pprev = &src->objects.first;
+
+ WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
+ return true;
+}
+
+static struct debug_obj *__alloc_object(struct hlist_head *list)
+{
+ struct debug_obj *obj;
+
+ if (unlikely(!list->first))
+ return NULL;
+
+ obj = hlist_entry(list->first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ return obj;
+}
+
+static void pcpu_refill_stats(void)
+{
+ struct pool_stats *stats = &pool_global.stats;
+
+ WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
+
+ if (stats->cur_used > stats->max_used)
+ stats->max_used = stats->cur_used;
+
+ if (pool_global.cnt < stats->min_fill)
+ stats->min_fill = pool_global.cnt;
+}
+
+static struct debug_obj *pcpu_alloc(void)
+{
+ struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+
+ lockdep_assert_irqs_disabled();
+
+ for (;;) {
+ struct debug_obj *obj = __alloc_object(&pcp->objects);
+
+ if (likely(obj)) {
+ pcp->cnt--;
+ /*
+ * If this emptied a batch try to refill from the
+ * free pool. Don't do that if this was the top-most
+ * batch as pcpu_free() expects the per CPU pool
+ * to be less than ODEBUG_POOL_PERCPU_SIZE.
+ */
+ if (unlikely(pcp->cnt < (ODEBUG_POOL_PERCPU_SIZE - ODEBUG_BATCH_SIZE) &&
+ !(pcp->cnt % ODEBUG_BATCH_SIZE))) {
+ /*
+ * Don't try to allocate from the regular pool here
+ * to not exhaust it prematurely.
+ */
+ if (pool_count(&pool_to_free)) {
+ guard(raw_spinlock)(&pool_lock);
+ pool_move_batch(pcp, &pool_to_free);
+ pcpu_refill_stats();
+ }
+ }
+ return obj;
+ }
+
+ guard(raw_spinlock)(&pool_lock);
+ if (!pool_move_batch(pcp, &pool_to_free)) {
+ if (!pool_move_batch(pcp, &pool_global))
+ return NULL;
+ }
+ pcpu_refill_stats();
+ }
+}
+
+static void pcpu_free(struct debug_obj *obj)
+{
+ struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+ struct debug_obj *first;
+
+ lockdep_assert_irqs_disabled();
+
+ if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
+ obj->batch_last = &obj->node;
+ } else {
+ first = hlist_entry(pcp->objects.first, typeof(*first), node);
+ obj->batch_last = first->batch_last;
+ }
+ hlist_add_head(&obj->node, &pcp->objects);
+ pcp->cnt++;
+
+ /* Pool full ? */
+ if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
return;
+ /* Remove a batch from the per CPU pool */
+ guard(raw_spinlock)(&pool_lock);
+ /* Try to fit the batch into the pool_global first */
+ if (!pool_move_batch(&pool_global, pcp))
+ pool_move_batch(&pool_to_free, pcp);
+ WRITE_ONCE(pool_global.stats.cur_used, pool_global.stats.cur_used - ODEBUG_BATCH_SIZE);
+}
+
+static void free_object_list(struct hlist_head *head)
+{
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+ int cnt = 0;
+
+ hlist_for_each_entry_safe(obj, tmp, head, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ cnt++;
+ }
+ debug_objects_freed += cnt;
+}
+
+static void fill_pool_from_freelist(void)
+{
+ static unsigned long state;
+
/*
* Reuse objs from the global obj_to_free list; they will be
* reinitialized when allocating.
- *
- * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
- * the WRITE_ONCE() in pool_lock critical sections.
*/
- if (READ_ONCE(obj_nr_tofree)) {
- raw_spin_lock_irqsave(&pool_lock, flags);
- /*
- * Recheck with the lock held as the worker thread might have
- * won the race and freed the global free list already.
- */
- while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
- obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
- hlist_del(&obj->node);
- WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
- hlist_add_head(&obj->node, &obj_pool);
- WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ if (!pool_count(&pool_to_free))
+ return;
+
+ /*
+ * Prevent the context from being scheduled or interrupted after
+ * setting the state flag;
+ */
+ guard(irqsave)();
+
+ /*
+ * Avoid lock contention on &pool_lock and avoid making the cache
+ * line exclusive by testing the bit before attempting to set it.
+ */
+ if (test_bit(0, &state) || test_and_set_bit(0, &state))
+ return;
+
+ /* Avoid taking the lock when there is no work to do */
+ while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
+ guard(raw_spinlock)(&pool_lock);
+ /* Move a batch if possible */
+ pool_move_batch(&pool_global, &pool_to_free);
+ }
+ clear_bit(0, &state);
+}
+
+static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
+{
+ struct hlist_node *last = NULL;
+ struct debug_obj *obj;
+
+ for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+ obj = kmem_cache_zalloc(cache, gfp);
+ if (!obj) {
+ free_object_list(head);
+ return false;
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
+ debug_objects_allocated++;
+
+ if (!last)
+ last = &obj->node;
+ obj->batch_last = last;
+
+ hlist_add_head(&obj->node, head);
}
+ return true;
+}
+
+static void fill_pool(void)
+{
+ static atomic_t cpus_allocating;
- if (unlikely(!obj_cache))
+ /*
+ * Avoid allocation and lock contention when:
+ * - One other CPU is already allocating
+ * - the global pool has not reached the critical level yet
+ */
+ if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
return;
- while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
- struct debug_obj *new[ODEBUG_BATCH_SIZE];
- int cnt;
+ atomic_inc(&cpus_allocating);
+ while (pool_should_refill(&pool_global)) {
+ HLIST_HEAD(head);
- for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
- new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
- if (!new[cnt])
- break;
- }
- if (!cnt)
- return;
+ if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
+ break;
- raw_spin_lock_irqsave(&pool_lock, flags);
- while (cnt) {
- hlist_add_head(&new[--cnt]->node, &obj_pool);
- debug_objects_allocated++;
- WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
- }
- raw_spin_unlock_irqrestore(&pool_lock, flags);
+ guard(raw_spinlock_irqsave)(&pool_lock);
+ if (!pool_push_batch(&pool_global, &head))
+ pool_push_batch(&pool_to_free, &head);
}
+ atomic_dec(&cpus_allocating);
}
/*
@@ -201,72 +429,37 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
return NULL;
}
-/*
- * Allocate a new object from the hlist
- */
-static struct debug_obj *__alloc_object(struct hlist_head *list)
+static void calc_usage(void)
{
- struct debug_obj *obj = NULL;
+ static DEFINE_RAW_SPINLOCK(avg_lock);
+ static unsigned long avg_period;
+ unsigned long cur, now = jiffies;
- if (list->first) {
- obj = hlist_entry(list->first, typeof(*obj), node);
- hlist_del(&obj->node);
- }
+ if (!time_after_eq(now, READ_ONCE(avg_period)))
+ return;
- return obj;
+ if (!raw_spin_trylock(&avg_lock))
+ return;
+
+ WRITE_ONCE(avg_period, now + msecs_to_jiffies(10));
+ cur = READ_ONCE(pool_global.stats.cur_used) * ODEBUG_FREE_WORK_MAX;
+ WRITE_ONCE(avg_usage, calc_load(avg_usage, EXP_5, cur));
+ raw_spin_unlock(&avg_lock);
}
-static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
+static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
+ const struct debug_obj_descr *descr)
{
- struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
struct debug_obj *obj;
- if (likely(obj_cache)) {
- obj = __alloc_object(&percpu_pool->free_objs);
- if (obj) {
- percpu_pool->obj_free--;
- goto init_obj;
- }
- }
-
- raw_spin_lock(&pool_lock);
- obj = __alloc_object(&obj_pool);
- if (obj) {
- obj_pool_used++;
- WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
-
- /*
- * Looking ahead, allocate one batch of debug objects and
- * put them into the percpu free pool.
- */
- if (likely(obj_cache)) {
- int i;
-
- for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
- struct debug_obj *obj2;
-
- obj2 = __alloc_object(&obj_pool);
- if (!obj2)
- break;
- hlist_add_head(&obj2->node,
- &percpu_pool->free_objs);
- percpu_pool->obj_free++;
- obj_pool_used++;
- WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
- }
- }
-
- if (obj_pool_used > obj_pool_max_used)
- obj_pool_max_used = obj_pool_used;
+ calc_usage();
- if (obj_pool_free < obj_pool_min_free)
- obj_pool_min_free = obj_pool_free;
- }
- raw_spin_unlock(&pool_lock);
+ if (static_branch_likely(&obj_cache_enabled))
+ obj = pcpu_alloc();
+ else
+ obj = __alloc_object(&pool_boot);
-init_obj:
- if (obj) {
+ if (likely(obj)) {
obj->object = addr;
obj->descr = descr;
obj->state = ODEBUG_STATE_NONE;
@@ -276,142 +469,58 @@ init_obj:
return obj;
}
-/*
- * workqueue function to free objects.
- *
- * To reduce contention on the global pool_lock, the actual freeing of
- * debug objects will be delayed if the pool_lock is busy.
- */
+/* workqueue function to free objects. */
static void free_obj_work(struct work_struct *work)
{
- struct hlist_node *tmp;
- struct debug_obj *obj;
- unsigned long flags;
- HLIST_HEAD(tofree);
+ static unsigned long last_use_avg;
+ unsigned long cur_used, last_used, delta;
+ unsigned int max_free = 0;
WRITE_ONCE(obj_freeing, false);
- if (!raw_spin_trylock_irqsave(&pool_lock, flags))
- return;
- if (obj_pool_free >= debug_objects_pool_size)
- goto free_objs;
+ /* Rate limit freeing based on current use average */
+ cur_used = READ_ONCE(avg_usage);
+ last_used = last_use_avg;
+ last_use_avg = cur_used;
- /*
- * The objs on the pool list might be allocated before the work is
- * run, so recheck if pool list it full or not, if not fill pool
- * list from the global free list. As it is likely that a workload
- * may be gearing up to use more and more objects, don't free any
- * of them until the next round.
- */
- while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
- obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
- hlist_del(&obj->node);
- hlist_add_head(&obj->node, &obj_pool);
- WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
- WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
- }
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- return;
+ if (!pool_count(&pool_to_free))
+ return;
-free_objs:
- /*
- * Pool list is already full and there are still objs on the free
- * list. Move remaining free objs to a temporary list to free the
- * memory outside the pool_lock held region.
- */
- if (obj_nr_tofree) {
- hlist_move_list(&obj_to_free, &tofree);
- debug_objects_freed += obj_nr_tofree;
- WRITE_ONCE(obj_nr_tofree, 0);
+ if (cur_used <= last_used) {
+ delta = (last_used - cur_used) / ODEBUG_FREE_WORK_MAX;
+ max_free = min(delta, ODEBUG_FREE_WORK_MAX);
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
- hlist_del(&obj->node);
- kmem_cache_free(obj_cache, obj);
+ for (int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
+ HLIST_HEAD(tofree);
+
+ /* Acquire and drop the lock for each batch */
+ scoped_guard(raw_spinlock_irqsave, &pool_lock) {
+ if (!pool_to_free.cnt)
+ return;
+
+ /* Refill the global pool if possible */
+ if (pool_move_batch(&pool_global, &pool_to_free)) {
+ /* Don't free as there seems to be demand */
+ max_free = 0;
+ } else if (max_free) {
+ pool_pop_batch(&tofree, &pool_to_free);
+ max_free--;
+ } else {
+ return;
+ }
+ }
+ free_object_list(&tofree);
}
}
static void __free_object(struct debug_obj *obj)
{
- struct debug_obj *objs[ODEBUG_BATCH_SIZE];
- struct debug_percpu_free *percpu_pool;
- int lookahead_count = 0;
- unsigned long flags;
- bool work;
-
- local_irq_save(flags);
- if (!obj_cache)
- goto free_to_obj_pool;
-
- /*
- * Try to free it into the percpu pool first.
- */
- percpu_pool = this_cpu_ptr(&percpu_obj_pool);
- if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
- hlist_add_head(&obj->node, &percpu_pool->free_objs);
- percpu_pool->obj_free++;
- local_irq_restore(flags);
- return;
- }
-
- /*
- * As the percpu pool is full, look ahead and pull out a batch
- * of objects from the percpu pool and free them as well.
- */
- for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
- objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
- if (!objs[lookahead_count])
- break;
- percpu_pool->obj_free--;
- }
-
-free_to_obj_pool:
- raw_spin_lock(&pool_lock);
- work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
- (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
- obj_pool_used--;
-
- if (work) {
- WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
- hlist_add_head(&obj->node, &obj_to_free);
- if (lookahead_count) {
- WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
- obj_pool_used -= lookahead_count;
- while (lookahead_count) {
- hlist_add_head(&objs[--lookahead_count]->node,
- &obj_to_free);
- }
- }
-
- if ((obj_pool_free > debug_objects_pool_size) &&
- (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
- int i;
-
- /*
- * Free one more batch of objects from obj_pool.
- */
- for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
- obj = __alloc_object(&obj_pool);
- hlist_add_head(&obj->node, &obj_to_free);
- WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
- WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
- }
- }
- } else {
- WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
- hlist_add_head(&obj->node, &obj_pool);
- if (lookahead_count) {
- WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
- obj_pool_used -= lookahead_count;
- while (lookahead_count) {
- hlist_add_head(&objs[--lookahead_count]->node,
- &obj_pool);
- }
- }
- }
- raw_spin_unlock(&pool_lock);
- local_irq_restore(flags);
+ guard(irqsave)();
+ if (static_branch_likely(&obj_cache_enabled))
+ pcpu_free(obj);
+ else
+ hlist_add_head(&obj->node, &pool_boot);
}
/*
@@ -421,63 +530,52 @@ free_to_obj_pool:
static void free_object(struct debug_obj *obj)
{
__free_object(obj);
- if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
WRITE_ONCE(obj_freeing, true);
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
}
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int object_cpu_offline(unsigned int cpu)
+static void put_objects(struct hlist_head *list)
{
- struct debug_percpu_free *percpu_pool;
struct hlist_node *tmp;
struct debug_obj *obj;
- unsigned long flags;
- /* Remote access is safe as the CPU is dead already */
- percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
- hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
+ /*
+ * Using free_object() puts the objects into reuse or schedules
+ * them for freeing and it get's all the accounting correct.
+ */
+ hlist_for_each_entry_safe(obj, tmp, list, node) {
hlist_del(&obj->node);
- kmem_cache_free(obj_cache, obj);
+ free_object(obj);
}
+}
- raw_spin_lock_irqsave(&pool_lock, flags);
- obj_pool_used -= percpu_pool->obj_free;
- debug_objects_freed += percpu_pool->obj_free;
- raw_spin_unlock_irqrestore(&pool_lock, flags);
-
- percpu_pool->obj_free = 0;
+#ifdef CONFIG_HOTPLUG_CPU
+static int object_cpu_offline(unsigned int cpu)
+{
+ /* Remote access is safe as the CPU is dead already */
+ struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
+ put_objects(&pcp->objects);
+ pcp->cnt = 0;
return 0;
}
#endif
-/*
- * We run out of memory. That means we probably have tons of objects
- * allocated.
- */
+/* Out of memory. Free all objects from hash */
static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
- struct hlist_node *tmp;
HLIST_HEAD(freelist);
- struct debug_obj *obj;
- unsigned long flags;
- int i;
pr_warn("Out of memory. ODEBUG disabled\n");
- for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
- raw_spin_lock_irqsave(&db->lock, flags);
- hlist_move_list(&db->list, &freelist);
- raw_spin_unlock_irqrestore(&db->lock, flags);
+ for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+ scoped_guard(raw_spinlock_irqsave, &db->lock)
+ hlist_move_list(&db->list, &freelist);
- /* Now free them */
- hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
- hlist_del(&obj->node);
- free_object(obj);
- }
+ put_objects(&freelist);
}
}
@@ -592,12 +690,24 @@ static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket
}
/* Out of memory. Do the cleanup outside of the locked region */
- debug_objects_enabled = 0;
+ debug_objects_enabled = false;
return NULL;
}
static void debug_objects_fill_pool(void)
{
+ if (!static_branch_likely(&obj_cache_enabled))
+ return;
+
+ if (likely(!pool_should_refill(&pool_global)))
+ return;
+
+ /* Try reusing objects from obj_to_free_list */
+ fill_pool_from_freelist();
+
+ if (likely(!pool_should_refill(&pool_global)))
+ return;
+
/*
* On RT enabled kernels the pool refill must happen in preemptible
* context -- for !RT kernels we rely on the fact that spinlock_t and
@@ -1007,7 +1117,7 @@ repeat:
debug_objects_maxchecked = objs_checked;
/* Schedule work to actually kmem_cache_free() objects */
- if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
WRITE_ONCE(obj_freeing, true);
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
}
@@ -1024,23 +1134,33 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
static int debug_stats_show(struct seq_file *m, void *v)
{
- int cpu, obj_percpu_free = 0;
+ unsigned int cpu, pool_used, pcp_free = 0;
+ /*
+ * pool_global.stats.cur_used is the number of batches currently
+ * handed out to per CPU pools. Convert it to number of objects
+ * and subtract the number of free objects in the per CPU pools.
+ * As this is lockless the number is an estimate.
+ */
for_each_possible_cpu(cpu)
- obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
-
- seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
- seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
- seq_printf(m, "warnings :%d\n", debug_objects_warnings);
- seq_printf(m, "fixups :%d\n", debug_objects_fixups);
- seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
- seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
- seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
- seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
- seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
- seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
- seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
- seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
+ pcp_free += per_cpu(pool_pcpu.cnt, cpu);
+
+ pool_used = READ_ONCE(pool_global.stats.cur_used);
+ pcp_free = min(pool_used, pcp_free);
+ pool_used -= pcp_free;
+
+ seq_printf(m, "max_chain : %d\n", debug_objects_maxchain);
+ seq_printf(m, "max_checked : %d\n", debug_objects_maxchecked);
+ seq_printf(m, "warnings : %d\n", debug_objects_warnings);
+ seq_printf(m, "fixups : %d\n", debug_objects_fixups);
+ seq_printf(m, "pool_free : %u\n", pool_count(&pool_global) + pcp_free);
+ seq_printf(m, "pool_pcp_free : %u\n", pcp_free);
+ seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
+ seq_printf(m, "pool_used : %u\n", pool_used);
+ seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
+ seq_printf(m, "on_free_list : %u\n", pool_count(&pool_to_free));
+ seq_printf(m, "objs_allocated: %d\n", debug_objects_allocated);
+ seq_printf(m, "objs_freed : %d\n", debug_objects_freed);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(debug_stats);
@@ -1194,7 +1314,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
out:
raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
- debug_objects_enabled = 0;
+ debug_objects_enabled = false;
return res;
}
@@ -1209,7 +1329,7 @@ static __initconst const struct debug_obj_descr descr_type_test = {
static __initdata struct self_test obj = { .static_init = 0 };
-static void __init debug_objects_selftest(void)
+static bool __init debug_objects_selftest(void)
{
int fixups, oldfixups, warnings, oldwarnings;
unsigned long flags;
@@ -1278,9 +1398,10 @@ out:
descr_test = NULL;
local_irq_restore(flags);
+ return debug_objects_enabled;
}
#else
-static inline void debug_objects_selftest(void) { }
+static inline bool debug_objects_selftest(void) { return true; }
#endif
/*
@@ -1295,65 +1416,54 @@ void __init debug_objects_early_init(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
raw_spin_lock_init(&obj_hash[i].lock);
+ /* Keep early boot simple and add everything to the boot list */
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
- hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+ hlist_add_head(&obj_static_pool[i].node, &pool_boot);
}
/*
- * Convert the statically allocated objects to dynamic ones:
+ * Convert the statically allocated objects to dynamic ones.
+ * debug_objects_mem_init() is called early so only one CPU is up and
+ * interrupts are disabled, which means it is safe to replace the active
+ * object references.
*/
-static int __init debug_objects_replace_static_objects(void)
+static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *tmp;
- struct debug_obj *obj, *new;
+ struct debug_obj *obj;
HLIST_HEAD(objects);
- int i, cnt = 0;
+ int i;
- for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
- obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
- if (!obj)
+ for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
+ if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
goto free;
- hlist_add_head(&obj->node, &objects);
+ pool_push_batch(&pool_global, &objects);
}
- debug_objects_allocated += i;
-
- /*
- * debug_objects_mem_init() is now called early that only one CPU is up
- * and interrupts have been disabled, so it is safe to replace the
- * active object references.
- */
-
- /* Remove the statically allocated objects from the pool */
- hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
- hlist_del(&obj->node);
- /* Move the allocated objects to the pool */
- hlist_move_list(&objects, &obj_pool);
+ /* Disconnect the boot pool. */
+ pool_boot.first = NULL;
/* Replace the active object references */
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
hlist_move_list(&db->list, &objects);
hlist_for_each_entry(obj, &objects, node) {
- new = hlist_entry(obj_pool.first, typeof(*obj), node);
- hlist_del(&new->node);
+ struct debug_obj *new = pcpu_alloc();
+
/* copy object data */
*new = *obj;
hlist_add_head(&new->node, &db->list);
- cnt++;
}
}
-
- pr_debug("%d of %d active objects replaced\n",
- cnt, obj_pool_used);
- return 0;
+ return true;
free:
- hlist_for_each_entry_safe(obj, tmp, &objects, node) {
+ /* Can't use free_object_list() as the cache is not populated yet */
+ hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
hlist_del(&obj->node);
- kmem_cache_free(obj_cache, obj);
+ kmem_cache_free(cache, obj);
}
- return -ENOMEM;
+ return false;
}
/*
@@ -1364,43 +1474,40 @@ free:
*/
void __init debug_objects_mem_init(void)
{
- int cpu, extras;
+ struct kmem_cache *cache;
+ int extras;
if (!debug_objects_enabled)
return;
- /*
- * Initialize the percpu object pools
- *
- * Initialization is not strictly necessary, but was done for
- * completeness.
- */
- for_each_possible_cpu(cpu)
- INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
+ if (!debug_objects_selftest())
+ return;
- obj_cache = kmem_cache_create("debug_objects_cache",
- sizeof (struct debug_obj), 0,
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
- NULL);
+ cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
- if (!obj_cache || debug_objects_replace_static_objects()) {
- debug_objects_enabled = 0;
- kmem_cache_destroy(obj_cache);
- pr_warn("out of memory.\n");
+ if (!cache || !debug_objects_replace_static_objects(cache)) {
+ debug_objects_enabled = false;
+ pr_warn("Out of memory.\n");
return;
- } else
- debug_objects_selftest();
-
-#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
- object_cpu_offline);
-#endif
+ }
/*
- * Increase the thresholds for allocating and freeing objects
- * according to the number of possible CPUs available in the system.
+ * Adjust the thresholds for allocating and freeing objects
+ * according to the number of possible CPUs available in the
+ * system.
*/
extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
- debug_objects_pool_size += extras;
- debug_objects_pool_min_level += extras;
+ pool_global.max_cnt += extras;
+ pool_global.min_cnt += extras;
+
+ /* Everything worked. Expose the cache */
+ obj_cache = cache;
+ static_branch_enable(&obj_cache_enabled);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
+ object_cpu_offline);
+#endif
+ return;
}
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index 83b65ac74d73..97c3d084ebf0 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -54,7 +54,8 @@ void dim_park_tired(struct dim *dim)
}
EXPORT_SYMBOL(dim_park_tired);
-bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(const struct dim_sample *start,
+ const struct dim_sample *end,
struct dim_stats *curr_stats)
{
/* u32 holds up to 71 minutes, should be enough */
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index d7e7028e9b19..d6aa09a979b3 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -347,7 +347,7 @@ static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
return dim->profile_ix != prev_ix;
}
-void net_dim(struct dim *dim, struct dim_sample end_sample)
+void net_dim(struct dim *dim, const struct dim_sample *end_sample)
{
struct dim_stats curr_stats;
u16 nevents;
@@ -355,11 +355,11 @@ void net_dim(struct dim *dim, struct dim_sample end_sample)
switch (dim->state) {
case DIM_MEASURE_IN_PROGRESS:
nevents = BIT_GAP(BITS_PER_TYPE(u16),
- end_sample.event_ctr,
+ end_sample->event_ctr,
dim->start_sample.event_ctr);
if (nevents < DIM_NEVENTS)
break;
- if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
+ if (!dim_calc_stats(&dim->start_sample, end_sample, &curr_stats))
break;
if (net_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
@@ -368,8 +368,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample)
}
fallthrough;
case DIM_START_MEASURE:
- dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
- end_sample.byte_ctr, &dim->start_sample);
+ dim_update_sample(end_sample->event_ctr, end_sample->pkt_ctr,
+ end_sample->byte_ctr, &dim->start_sample);
dim->state = DIM_MEASURE_IN_PROGRESS;
break;
case DIM_APPLY_NEW_PROFILE:
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index e49deddd3de9..c1b7638a594a 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -179,7 +179,7 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->adj_limit = limit + completed;
dql->prev_ovlimit = ovlimit;
- dql->prev_last_obj_cnt = dql->last_obj_cnt;
+ dql->prev_last_obj_cnt = READ_ONCE(dql->last_obj_cnt);
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index f37f4d44faa9..837064b83a6c 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interval_tree.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <asm/timex.h>
diff --git a/lib/iomem_copy.c b/lib/iomem_copy.c
new file mode 100644
index 000000000000..dec7eaea60e0
--- /dev/null
+++ b/lib/iomem_copy.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Kalray, Inc. All Rights Reserved.
+ */
+
+#include <linux/align.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#ifndef memset_io
+/**
+ * memset_io() - Set a range of I/O memory to a constant value
+ * @addr: The beginning of the I/O-memory range to set
+ * @val: The value to set the memory to
+ * @count: The number of bytes to set
+ *
+ * Set a range of I/O memory to a given value.
+ */
+void memset_io(volatile void __iomem *addr, int val, size_t count)
+{
+ long qc = (u8)val;
+
+ qc *= ~0UL / 0xff;
+
+ while (count && !IS_ALIGNED((long)addr, sizeof(long))) {
+ __raw_writeb(val, addr);
+ addr++;
+ count--;
+ }
+
+ while (count >= sizeof(long)) {
+#ifdef CONFIG_64BIT
+ __raw_writeq(qc, addr);
+#else
+ __raw_writel(qc, addr);
+#endif
+
+ addr += sizeof(long);
+ count -= sizeof(long);
+ }
+
+ while (count) {
+ __raw_writeb(val, addr);
+ addr++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(memset_io);
+#endif
+
+#ifndef memcpy_fromio
+/**
+ * memcpy_fromio() - Copy a block of data from I/O memory
+ * @dst: The (RAM) destination for the copy
+ * @src: The (I/O memory) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data from I/O memory.
+ */
+void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
+{
+ while (count && !IS_ALIGNED((long)src, sizeof(long))) {
+ *(u8 *)dst = __raw_readb(src);
+ src++;
+ dst++;
+ count--;
+ }
+
+ while (count >= sizeof(long)) {
+#ifdef CONFIG_64BIT
+ long val = __raw_readq(src);
+#else
+ long val = __raw_readl(src);
+#endif
+ put_unaligned(val, (long *)dst);
+
+
+ src += sizeof(long);
+ dst += sizeof(long);
+ count -= sizeof(long);
+ }
+
+ while (count) {
+ *(u8 *)dst = __raw_readb(src);
+ src++;
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(memcpy_fromio);
+#endif
+
+#ifndef memcpy_toio
+/**
+ * memcpy_toio() -Copy a block of data into I/O memory
+ * @dst: The (I/O memory) destination for the copy
+ * @src: The (RAM) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data to I/O memory.
+ */
+void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
+{
+ while (count && !IS_ALIGNED((long)dst, sizeof(long))) {
+ __raw_writeb(*(u8 *)src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+
+ while (count >= sizeof(long)) {
+ long val = get_unaligned((long *)src);
+#ifdef CONFIG_64BIT
+ __raw_writeq(val, dst);
+#else
+ __raw_writel(val, dst);
+#endif
+
+ src += sizeof(long);
+ dst += sizeof(long);
+ count -= sizeof(long);
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(memcpy_toio);
+#endif
+
+
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 908e75a28d90..9ec806f989f2 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1682,8 +1682,8 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
}
/*
- * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
- * not get references on the pages, nor does it get a pin on them.
+ * Extract a list of virtually contiguous pages from an ITER_BVEC iterator.
+ * This does not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
@@ -1691,35 +1691,59 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
- struct page **p, *page;
- size_t skip = i->iov_offset, offset, size;
- int k;
+ size_t skip = i->iov_offset, size = 0;
+ struct bvec_iter bi;
+ int k = 0;
- for (;;) {
- if (i->nr_segs == 0)
- return 0;
- size = min(maxsize, i->bvec->bv_len - skip);
- if (size)
- break;
+ if (i->nr_segs == 0)
+ return 0;
+
+ if (i->iov_offset == i->bvec->bv_len) {
i->iov_offset = 0;
i->nr_segs--;
i->bvec++;
skip = 0;
}
+ bi.bi_idx = 0;
+ bi.bi_size = maxsize;
+ bi.bi_bvec_done = skip;
+
+ maxpages = want_pages_array(pages, maxsize, skip, maxpages);
+
+ while (bi.bi_size && bi.bi_idx < i->nr_segs) {
+ struct bio_vec bv = bvec_iter_bvec(i->bvec, bi);
+
+ /*
+ * The iov_iter_extract_pages interface only allows an offset
+ * into the first page. Break out of the loop if we see an
+ * offset into subsequent pages, the caller will have to call
+ * iov_iter_extract_pages again for the reminder.
+ */
+ if (k) {
+ if (bv.bv_offset)
+ break;
+ } else {
+ *offset0 = bv.bv_offset;
+ }
- skip += i->bvec->bv_offset;
- page = i->bvec->bv_page + skip / PAGE_SIZE;
- offset = skip % PAGE_SIZE;
- *offset0 = offset;
+ (*pages)[k++] = bv.bv_page;
+ size += bv.bv_len;
- maxpages = want_pages_array(pages, size, offset, maxpages);
- if (!maxpages)
- return -ENOMEM;
- p = *pages;
- for (k = 0; k < maxpages; k++)
- p[k] = page + k;
+ if (k >= maxpages)
+ break;
+
+ /*
+ * We are done when the end of the bvec doesn't align to a page
+ * boundary as that would create a hole in the returned space.
+ * The caller will handle this with another call to
+ * iov_iter_extract_pages.
+ */
+ if (bv.bv_offset + bv.bv_len != PAGE_SIZE)
+ break;
+
+ bvec_iter_advance_single(i->bvec, &bi, bv.bv_len);
+ }
- size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
iov_iter_advance(i, size);
return size;
}
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index d548750a325a..af71911f4a07 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -181,7 +181,7 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite)
* successfully.
*/
stream = alloc_string_stream(GFP_KERNEL);
- if (IS_ERR_OR_NULL(stream))
+ if (IS_ERR(stream))
return;
string_stream_set_append_newlines(stream, true);
@@ -189,7 +189,7 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) {
stream = alloc_string_stream(GFP_KERNEL);
- if (IS_ERR_OR_NULL(stream))
+ if (IS_ERR(stream))
goto err;
string_stream_set_append_newlines(stream, true);
@@ -212,8 +212,11 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite)
err:
string_stream_destroy(suite->log);
- kunit_suite_for_each_test_case(suite, test_case)
+ suite->log = NULL;
+ kunit_suite_for_each_test_case(suite, test_case) {
string_stream_destroy(test_case->log);
+ test_case->log = NULL;
+ }
}
void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 37e02be1e710..d9c781c859fd 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -805,6 +805,8 @@ static void kunit_device_driver_test(struct kunit *test)
struct device *test_device;
struct driver_test_state *test_state = kunit_kzalloc(test, sizeof(*test_state), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_state);
+
test->priv = test_state;
test_driver = kunit_driver_create(test, "my_driver");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 7511442ea98f..7734e33156f9 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -9,6 +9,7 @@
#include <kunit/static_stub.h>
#include <kunit/test.h>
#include <linux/ktime.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 6f6a5fc85b42..6e0c019f71b6 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2710,6 +2710,43 @@ static void local_lock_3B(void)
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline const char *rw_semaphore_lockdep_name(struct rw_semaphore *rwsem)
+{
+ return rwsem->dep_map.name;
+}
+#else
+static inline const char *rw_semaphore_lockdep_name(struct rw_semaphore *rwsem)
+{
+ return NULL;
+}
+#endif
+
+static void test_lockdep_set_subclass_name(void)
+{
+ const char *name_before = rw_semaphore_lockdep_name(&rwsem_X1);
+ const char *name_after;
+
+ lockdep_set_subclass(&rwsem_X1, 1);
+ name_after = rw_semaphore_lockdep_name(&rwsem_X1);
+ DEBUG_LOCKS_WARN_ON(name_before != name_after);
+}
+
+/*
+ * lockdep_set_subclass() should reuse the existing lock class name instead
+ * of creating a new one.
+ */
+static void lockdep_set_subclass_name_test(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | lockdep_set_subclass() name test|\n");
+ printk(" -----------------------------------\n");
+
+ print_testname("compare name before and after");
+ dotest(test_lockdep_set_subclass_name, SUCCESS, LOCKTYPE_RWSEM);
+ pr_cont("\n");
+}
+
static void local_lock_tests(void)
{
printk(" --------------------------------------------------------------------------\n");
@@ -2920,6 +2957,8 @@ void locking_selftest(void)
dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
pr_cont("\n");
+ lockdep_set_subclass_name_test();
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index 2ea564a40064..e29496a38d06 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -122,7 +122,7 @@ void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
*
* Traverse the io_range_list to find the registered node for @fwnode.
*/
-struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
+struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode)
{
struct logic_pio_hwaddr *range, *found_range = NULL;
@@ -186,7 +186,7 @@ resource_size_t logic_pio_to_hwaddr(unsigned long pio)
*
* Returns Logical PIO value if successful, ~0UL otherwise
*/
-unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode,
resource_size_t addr, resource_size_t size)
{
struct logic_pio_hwaddr *range;
diff --git a/lib/math/test_div64.c b/lib/math/test_div64.c
index c15edd688dd2..3cd699b654d9 100644
--- a/lib/math/test_div64.c
+++ b/lib/math/test_div64.c
@@ -26,6 +26,9 @@ static const u64 test_div64_dividends[] = {
0x0072db27380dd689,
0x0842f488162e2284,
0xf66745411d8ab063,
+ 0xfffffffffffffffb,
+ 0xfffffffffffffffc,
+ 0xffffffffffffffff,
};
#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends)
@@ -37,7 +40,10 @@ static const u64 test_div64_dividends[] = {
#define TEST_DIV64_DIVISOR_5 0x0008a880
#define TEST_DIV64_DIVISOR_6 0x003fd3ae
#define TEST_DIV64_DIVISOR_7 0x0b658fac
-#define TEST_DIV64_DIVISOR_8 0xdc08b349
+#define TEST_DIV64_DIVISOR_8 0x80000001
+#define TEST_DIV64_DIVISOR_9 0xdc08b349
+#define TEST_DIV64_DIVISOR_A 0xfffffffe
+#define TEST_DIV64_DIVISOR_B 0xffffffff
static const u32 test_div64_divisors[] = {
TEST_DIV64_DIVISOR_0,
@@ -49,13 +55,16 @@ static const u32 test_div64_divisors[] = {
TEST_DIV64_DIVISOR_6,
TEST_DIV64_DIVISOR_7,
TEST_DIV64_DIVISOR_8,
+ TEST_DIV64_DIVISOR_9,
+ TEST_DIV64_DIVISOR_A,
+ TEST_DIV64_DIVISOR_B,
};
#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors)
static const struct {
u64 quotient;
u32 remainder;
-} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = {
+} test_div64_results[SIZE_DIV64_DIVIDENDS][SIZE_DIV64_DIVISORS] = {
{
{ 0x0000000013045e47, 0x00000001 },
{ 0x000000000161596c, 0x00000030 },
@@ -65,6 +74,9 @@ static const struct {
{ 0x00000000000013c4, 0x0004ce80 },
{ 0x00000000000002ae, 0x001e143c },
{ 0x000000000000000f, 0x0033e56c },
+ { 0x0000000000000001, 0x2b27507f },
+ { 0x0000000000000000, 0xab275080 },
+ { 0x0000000000000000, 0xab275080 },
{ 0x0000000000000000, 0xab275080 },
}, {
{ 0x00000001c45c02d1, 0x00000000 },
@@ -75,7 +87,10 @@ static const struct {
{ 0x000000000001d637, 0x0004e5d9 },
{ 0x0000000000003fc9, 0x000713bb },
{ 0x0000000000000165, 0x029abe7d },
+ { 0x000000000000001f, 0x673c193a },
{ 0x0000000000000012, 0x6e9f7e37 },
+ { 0x000000000000000f, 0xe73c1977 },
+ { 0x000000000000000f, 0xe73c1968 },
}, {
{ 0x000000197a3a0cf7, 0x00000002 },
{ 0x00000001d9632e5c, 0x00000021 },
@@ -85,7 +100,10 @@ static const struct {
{ 0x00000000001a7bb3, 0x00072331 },
{ 0x00000000000397ad, 0x0002c61b },
{ 0x000000000000141e, 0x06ea2e89 },
+ { 0x00000000000001ca, 0x4c0a72e7 },
{ 0x000000000000010a, 0xab002ad7 },
+ { 0x00000000000000e5, 0x4c0a767b },
+ { 0x00000000000000e5, 0x4c0a7596 },
}, {
{ 0x0000017949e37538, 0x00000001 },
{ 0x0000001b62441f37, 0x00000055 },
@@ -95,7 +113,10 @@ static const struct {
{ 0x0000000001882ec6, 0x0005cbf9 },
{ 0x000000000035333b, 0x0017abdf },
{ 0x00000000000129f1, 0x0ab4520d },
+ { 0x0000000000001a87, 0x18ff0472 },
{ 0x0000000000000f6e, 0x8ac0ce9b },
+ { 0x0000000000000d43, 0x98ff397f },
+ { 0x0000000000000d43, 0x98ff2c3c },
}, {
{ 0x000011f321a74e49, 0x00000006 },
{ 0x0000014d8481d211, 0x0000005b },
@@ -105,7 +126,10 @@ static const struct {
{ 0x0000000012a88828, 0x00036c97 },
{ 0x000000000287f16f, 0x002c2a25 },
{ 0x00000000000e2cc7, 0x02d581e3 },
+ { 0x0000000000014318, 0x2ee07d7f },
{ 0x000000000000bbf4, 0x1ba08c03 },
+ { 0x000000000000a18c, 0x2ee303af },
+ { 0x000000000000a18c, 0x2ee26223 },
}, {
{ 0x0000d8db8f72935d, 0x00000005 },
{ 0x00000fbd5aed7a2e, 0x00000002 },
@@ -115,7 +139,10 @@ static const struct {
{ 0x00000000e16b20fa, 0x0002a14a },
{ 0x000000001e940d22, 0x00353b2e },
{ 0x0000000000ab40ac, 0x06fba6ba },
+ { 0x00000000000f3f70, 0x0af7eeda },
{ 0x000000000008debd, 0x72d98365 },
+ { 0x0000000000079fb8, 0x0b166dba },
+ { 0x0000000000079fb8, 0x0b0ece02 },
}, {
{ 0x000cc3045b8fc281, 0x00000000 },
{ 0x0000ed1f48b5c9fc, 0x00000079 },
@@ -125,7 +152,10 @@ static const struct {
{ 0x0000000d43fce827, 0x00082b09 },
{ 0x00000001ccaba11a, 0x0037e8dd },
{ 0x000000000a13f729, 0x0566dffd },
+ { 0x0000000000e5b64e, 0x3728203b },
{ 0x000000000085a14b, 0x23d36726 },
+ { 0x000000000072db27, 0x38f38cd7 },
+ { 0x000000000072db27, 0x3880b1b0 },
}, {
{ 0x00eafeb9c993592b, 0x00000001 },
{ 0x00110e5befa9a991, 0x00000048 },
@@ -135,7 +165,10 @@ static const struct {
{ 0x000000f4459740fc, 0x00084484 },
{ 0x0000002122c47bf9, 0x002ca446 },
{ 0x00000000b9936290, 0x004979c4 },
+ { 0x000000001085e910, 0x05a83974 },
{ 0x00000000099ca89d, 0x9db446bf },
+ { 0x000000000842f488, 0x26b40b94 },
+ { 0x000000000842f488, 0x1e71170c },
}, {
{ 0x1b60cece589da1d2, 0x00000001 },
{ 0x01fcb42be1453f5b, 0x0000004f },
@@ -145,7 +178,49 @@ static const struct {
{ 0x00001c757dfab350, 0x00048863 },
{ 0x000003dc4979c652, 0x00224ea7 },
{ 0x000000159edc3144, 0x06409ab3 },
+ { 0x00000001ecce8a7e, 0x30bc25e5 },
{ 0x000000011eadfee3, 0xa99c48a8 },
+ { 0x00000000f6674543, 0x0a593ae9 },
+ { 0x00000000f6674542, 0x13f1f5a5 },
+ }, {
+ { 0x1c71c71c71c71c71, 0x00000002 },
+ { 0x0210842108421084, 0x0000000b },
+ { 0x007f01fc07f01fc0, 0x000000fb },
+ { 0x00014245eabf1f9a, 0x0000a63d },
+ { 0x0000ffffffffffff, 0x0000fffb },
+ { 0x00001d913cecc509, 0x0007937b },
+ { 0x00000402c70c678f, 0x0005bfc9 },
+ { 0x00000016766cb70b, 0x045edf97 },
+ { 0x00000001fffffffb, 0x80000000 },
+ { 0x0000000129d84b3a, 0xa2e8fe71 },
+ { 0x0000000100000001, 0xfffffffd },
+ { 0x0000000100000000, 0xfffffffb },
+ }, {
+ { 0x1c71c71c71c71c71, 0x00000003 },
+ { 0x0210842108421084, 0x0000000c },
+ { 0x007f01fc07f01fc0, 0x000000fc },
+ { 0x00014245eabf1f9a, 0x0000a63e },
+ { 0x0000ffffffffffff, 0x0000fffc },
+ { 0x00001d913cecc509, 0x0007937c },
+ { 0x00000402c70c678f, 0x0005bfca },
+ { 0x00000016766cb70b, 0x045edf98 },
+ { 0x00000001fffffffc, 0x00000000 },
+ { 0x0000000129d84b3a, 0xa2e8fe72 },
+ { 0x0000000100000002, 0x00000000 },
+ { 0x0000000100000000, 0xfffffffc },
+ }, {
+ { 0x1c71c71c71c71c71, 0x00000006 },
+ { 0x0210842108421084, 0x0000000f },
+ { 0x007f01fc07f01fc0, 0x000000ff },
+ { 0x00014245eabf1f9a, 0x0000a641 },
+ { 0x0000ffffffffffff, 0x0000ffff },
+ { 0x00001d913cecc509, 0x0007937f },
+ { 0x00000402c70c678f, 0x0005bfcd },
+ { 0x00000016766cb70b, 0x045edf9b },
+ { 0x00000001fffffffc, 0x00000003 },
+ { 0x0000000129d84b3a, 0xa2e8fe75 },
+ { 0x0000000100000002, 0x00000003 },
+ { 0x0000000100000001, 0x00000000 },
},
};
@@ -208,6 +283,12 @@ static bool __init test_div64(void)
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8))
return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_9, i, 9))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_A, i, 10))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_B, i, 11))
+ return false;
for (j = 0; j < SIZE_DIV64_DIVISORS; j++) {
if (!test_div64_one(dividend, test_div64_divisors[j],
i, j))
diff --git a/lib/packing.c b/lib/packing.c
index 3f656167c17e..793942745e34 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -9,52 +9,53 @@
#include <linux/types.h>
#include <linux/bitrev.h>
-static int get_le_offset(int offset)
+/**
+ * calculate_box_addr - Determine physical location of byte in buffer
+ * @box: Index of byte within buffer seen as a logical big-endian big number
+ * @len: Size of buffer in bytes
+ * @quirks: mask of QUIRK_LSW32_IS_FIRST and QUIRK_LITTLE_ENDIAN
+ *
+ * Function interprets the buffer as a @len byte sized big number, and returns
+ * the physical offset of the @box logical octet within it. Internally, it
+ * treats the big number as groups of 4 bytes. If @len is not a multiple of 4,
+ * the last group may be shorter.
+ *
+ * @QUIRK_LSW32_IS_FIRST gives the ordering of groups of 4 octets relative to
+ * each other. If set, the most significant group of 4 octets is last in the
+ * buffer (and may be truncated if @len is not a multiple of 4).
+ *
+ * @QUIRK_LITTLE_ENDIAN gives the ordering of bytes within each group of 4.
+ * If set, the most significant byte is last in the group. If @len takes the
+ * form of 4k+3, the last group will only be able to represent 24 bits, and its
+ * most significant octet is byte 2.
+ *
+ * Return: the physical offset into the buffer corresponding to the logical box.
+ */
+static size_t calculate_box_addr(size_t box, size_t len, u8 quirks)
{
- int closest_multiple_of_4;
+ size_t offset_of_group, offset_in_group, this_group = box / 4;
+ size_t group_size;
- closest_multiple_of_4 = (offset / 4) * 4;
- offset -= closest_multiple_of_4;
- return closest_multiple_of_4 + (3 - offset);
-}
+ if (quirks & QUIRK_LSW32_IS_FIRST)
+ offset_of_group = this_group * 4;
+ else
+ offset_of_group = len - ((this_group + 1) * 4);
-static int get_reverse_lsw32_offset(int offset, size_t len)
-{
- int closest_multiple_of_4;
- int word_index;
-
- word_index = offset / 4;
- closest_multiple_of_4 = word_index * 4;
- offset -= closest_multiple_of_4;
- word_index = (len / 4) - word_index - 1;
- return word_index * 4 + offset;
-}
+ group_size = min(4, len - offset_of_group);
-static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
- int *box_end_bit, u8 *box_mask)
-{
- int box_bit_width = *box_start_bit - *box_end_bit + 1;
- int new_box_start_bit, new_box_end_bit;
-
- *to_write >>= *box_end_bit;
- *to_write = bitrev8(*to_write) >> (8 - box_bit_width);
- *to_write <<= *box_end_bit;
-
- new_box_end_bit = box_bit_width - *box_start_bit - 1;
- new_box_start_bit = box_bit_width - *box_end_bit - 1;
- *box_mask = GENMASK_ULL(new_box_start_bit, new_box_end_bit);
- *box_start_bit = new_box_start_bit;
- *box_end_bit = new_box_end_bit;
+ if (quirks & QUIRK_LITTLE_ENDIAN)
+ offset_in_group = box - this_group * 4;
+ else
+ offset_in_group = group_size - (box - this_group * 4) - 1;
+
+ return offset_of_group + offset_in_group;
}
/**
- * packing - Convert numbers (currently u64) between a packed and an unpacked
- * format. Unpacked means laid out in memory in the CPU's native
- * understanding of integers, while packed means anything else that
- * requires translation.
+ * pack - Pack u64 number into bitfield of buffer.
*
* @pbuf: Pointer to a buffer holding the packed value.
- * @uval: Pointer to an u64 holding the unpacked value.
+ * @uval: CPU-readable unpacked value to pack.
* @startbit: The index (in logical notation, compensated for quirks) where
* the packed value starts within pbuf. Must be larger than, or
* equal to, endbit.
@@ -62,79 +63,179 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
* the packed value ends within pbuf. Must be smaller than, or equal
* to, startbit.
* @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
- * @op: If PACK, then uval will be treated as const pointer and copied (packed)
- * into pbuf, between startbit and endbit.
- * If UNPACK, then pbuf will be treated as const pointer and the logical
- * value between startbit and endbit will be copied (unpacked) to uval.
* @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
* QUIRK_MSB_ON_THE_RIGHT.
*
* Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
- * correct usage, return code may be discarded.
- * If op is PACK, pbuf is modified.
- * If op is UNPACK, uval is modified.
+ * correct usage, return code may be discarded. The @pbuf memory will
+ * be modified on success.
*/
-int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
- enum packing_op op, u8 quirks)
+int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
+ u8 quirks)
{
- /* Number of bits for storing "uval"
- * also width of the field to access in the pbuf
- */
- u64 value_width;
/* Logical byte indices corresponding to the
* start and end of the field.
*/
int plogical_first_u8, plogical_last_u8, box;
+ /* width of the field to access in the pbuf */
+ u64 value_width;
- /* startbit is expected to be larger than endbit */
- if (startbit < endbit)
+ /* startbit is expected to be larger than endbit, and both are
+ * expected to be within the logically addressable range of the buffer.
+ */
+ if (unlikely(startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen))
/* Invalid function call */
return -EINVAL;
value_width = startbit - endbit + 1;
- if (value_width > 64)
+ if (unlikely(value_width > 64))
return -ERANGE;
/* Check if "uval" fits in "value_width" bits.
* If value_width is 64, the check will fail, but any
* 64-bit uval will surely fit.
*/
- if (op == PACK && value_width < 64 && (*uval >= (1ull << value_width)))
+ if (unlikely(value_width < 64 && uval >= (1ull << value_width)))
/* Cannot store "uval" inside "value_width" bits.
* Truncating "uval" is most certainly not desirable,
* so simply erroring out is appropriate.
*/
return -ERANGE;
+ /* Iterate through an idealistic view of the pbuf as an u64 with
+ * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
+ * logical bit significance. "box" denotes the current logical u8.
+ */
+ plogical_first_u8 = startbit / BITS_PER_BYTE;
+ plogical_last_u8 = endbit / BITS_PER_BYTE;
+
+ for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
+ /* Bit indices into the currently accessed 8-bit box */
+ size_t box_start_bit, box_end_bit, box_addr;
+ u8 box_mask;
+ /* Corresponding bits from the unpacked u64 parameter */
+ size_t proj_start_bit, proj_end_bit;
+ u64 proj_mask;
+ u64 pval;
+
+ /* This u8 may need to be accessed in its entirety
+ * (from bit 7 to bit 0), or not, depending on the
+ * input arguments startbit and endbit.
+ */
+ if (box == plogical_first_u8)
+ box_start_bit = startbit % BITS_PER_BYTE;
+ else
+ box_start_bit = 7;
+ if (box == plogical_last_u8)
+ box_end_bit = endbit % BITS_PER_BYTE;
+ else
+ box_end_bit = 0;
+
+ /* We have determined the box bit start and end.
+ * Now we calculate where this (masked) u8 box would fit
+ * in the unpacked (CPU-readable) u64 - the u8 box's
+ * projection onto the unpacked u64. Though the
+ * box is u8, the projection is u64 because it may fall
+ * anywhere within the unpacked u64.
+ */
+ proj_start_bit = ((box * BITS_PER_BYTE) + box_start_bit) - endbit;
+ proj_end_bit = ((box * BITS_PER_BYTE) + box_end_bit) - endbit;
+ proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit);
+ box_mask = GENMASK(box_start_bit, box_end_bit);
+
+ /* Determine the offset of the u8 box inside the pbuf,
+ * adjusted for quirks. The adjusted box_addr will be used for
+ * effective addressing inside the pbuf (so it's not
+ * logical any longer).
+ */
+ box_addr = calculate_box_addr(box, pbuflen, quirks);
+
+ /* Write to pbuf, read from uval */
+ pval = uval & proj_mask;
+ pval >>= proj_end_bit;
+ pval <<= box_end_bit;
+
+ if (quirks & QUIRK_MSB_ON_THE_RIGHT) {
+ pval = bitrev8(pval);
+ box_mask = bitrev8(box_mask);
+ }
+
+ ((u8 *)pbuf)[box_addr] &= ~box_mask;
+ ((u8 *)pbuf)[box_addr] |= pval;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(pack);
+
+/**
+ * unpack - Unpack u64 number from packed buffer.
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @uval: Pointer to an u64 holding the unpacked value.
+ * @startbit: The index (in logical notation, compensated for quirks) where
+ * the packed value starts within pbuf. Must be larger than, or
+ * equal to, endbit.
+ * @endbit: The index (in logical notation, compensated for quirks) where
+ * the packed value ends within pbuf. Must be smaller than, or equal
+ * to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
+ * correct usage, return code may be discarded. The @uval will be
+ * modified on success.
+ */
+int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks)
+{
+ /* Logical byte indices corresponding to the
+ * start and end of the field.
+ */
+ int plogical_first_u8, plogical_last_u8, box;
+ /* width of the field to access in the pbuf */
+ u64 value_width;
+
+ /* startbit is expected to be larger than endbit, and both are
+ * expected to be within the logically addressable range of the buffer.
+ */
+ if (unlikely(startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen))
+ /* Invalid function call */
+ return -EINVAL;
+
+ value_width = startbit - endbit + 1;
+ if (unlikely(value_width > 64))
+ return -ERANGE;
+
/* Initialize parameter */
- if (op == UNPACK)
- *uval = 0;
+ *uval = 0;
/* Iterate through an idealistic view of the pbuf as an u64 with
* no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
* logical bit significance. "box" denotes the current logical u8.
*/
- plogical_first_u8 = startbit / 8;
- plogical_last_u8 = endbit / 8;
+ plogical_first_u8 = startbit / BITS_PER_BYTE;
+ plogical_last_u8 = endbit / BITS_PER_BYTE;
for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
/* Bit indices into the currently accessed 8-bit box */
- int box_start_bit, box_end_bit, box_addr;
+ size_t box_start_bit, box_end_bit, box_addr;
u8 box_mask;
/* Corresponding bits from the unpacked u64 parameter */
- int proj_start_bit, proj_end_bit;
+ size_t proj_start_bit, proj_end_bit;
u64 proj_mask;
+ u64 pval;
/* This u8 may need to be accessed in its entirety
* (from bit 7 to bit 0), or not, depending on the
* input arguments startbit and endbit.
*/
if (box == plogical_first_u8)
- box_start_bit = startbit % 8;
+ box_start_bit = startbit % BITS_PER_BYTE;
else
box_start_bit = 7;
if (box == plogical_last_u8)
- box_end_bit = endbit % 8;
+ box_end_bit = endbit % BITS_PER_BYTE;
else
box_end_bit = 0;
@@ -145,57 +246,72 @@ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
* box is u8, the projection is u64 because it may fall
* anywhere within the unpacked u64.
*/
- proj_start_bit = ((box * 8) + box_start_bit) - endbit;
- proj_end_bit = ((box * 8) + box_end_bit) - endbit;
+ proj_start_bit = ((box * BITS_PER_BYTE) + box_start_bit) - endbit;
+ proj_end_bit = ((box * BITS_PER_BYTE) + box_end_bit) - endbit;
proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit);
- box_mask = GENMASK_ULL(box_start_bit, box_end_bit);
+ box_mask = GENMASK(box_start_bit, box_end_bit);
/* Determine the offset of the u8 box inside the pbuf,
* adjusted for quirks. The adjusted box_addr will be used for
* effective addressing inside the pbuf (so it's not
* logical any longer).
*/
- box_addr = pbuflen - box - 1;
- if (quirks & QUIRK_LITTLE_ENDIAN)
- box_addr = get_le_offset(box_addr);
- if (quirks & QUIRK_LSW32_IS_FIRST)
- box_addr = get_reverse_lsw32_offset(box_addr,
- pbuflen);
-
- if (op == UNPACK) {
- u64 pval;
-
- /* Read from pbuf, write to uval */
- pval = ((u8 *)pbuf)[box_addr] & box_mask;
- if (quirks & QUIRK_MSB_ON_THE_RIGHT)
- adjust_for_msb_right_quirk(&pval,
- &box_start_bit,
- &box_end_bit,
- &box_mask);
-
- pval >>= box_end_bit;
- pval <<= proj_end_bit;
- *uval &= ~proj_mask;
- *uval |= pval;
- } else {
- u64 pval;
-
- /* Write to pbuf, read from uval */
- pval = (*uval) & proj_mask;
- pval >>= proj_end_bit;
- if (quirks & QUIRK_MSB_ON_THE_RIGHT)
- adjust_for_msb_right_quirk(&pval,
- &box_start_bit,
- &box_end_bit,
- &box_mask);
-
- pval <<= box_end_bit;
- ((u8 *)pbuf)[box_addr] &= ~box_mask;
- ((u8 *)pbuf)[box_addr] |= pval;
- }
+ box_addr = calculate_box_addr(box, pbuflen, quirks);
+
+ /* Read from pbuf, write to uval */
+ pval = ((u8 *)pbuf)[box_addr];
+
+ if (quirks & QUIRK_MSB_ON_THE_RIGHT)
+ pval = bitrev8(pval);
+
+ pval &= box_mask;
+
+ pval >>= box_end_bit;
+ pval <<= proj_end_bit;
+ *uval &= ~proj_mask;
+ *uval |= pval;
}
return 0;
}
+EXPORT_SYMBOL(unpack);
+
+/**
+ * packing - Convert numbers (currently u64) between a packed and an unpacked
+ * format. Unpacked means laid out in memory in the CPU's native
+ * understanding of integers, while packed means anything else that
+ * requires translation.
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @uval: Pointer to an u64 holding the unpacked value.
+ * @startbit: The index (in logical notation, compensated for quirks) where
+ * the packed value starts within pbuf. Must be larger than, or
+ * equal to, endbit.
+ * @endbit: The index (in logical notation, compensated for quirks) where
+ * the packed value ends within pbuf. Must be smaller than, or equal
+ * to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @op: If PACK, then uval will be treated as const pointer and copied (packed)
+ * into pbuf, between startbit and endbit.
+ * If UNPACK, then pbuf will be treated as const pointer and the logical
+ * value between startbit and endbit will be copied (unpacked) to uval.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Note: this is deprecated, prefer to use pack() or unpack() in new code.
+ *
+ * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
+ * correct usage, return code may be discarded.
+ * If op is PACK, pbuf is modified.
+ * If op is UNPACK, uval is modified.
+ */
+int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
+ enum packing_op op, u8 quirks)
+{
+ if (op == PACK)
+ return pack(pbuf, *uval, startbit, endbit, pbuflen, quirks);
+
+ return unpack(pbuf, uval, startbit, endbit, pbuflen, quirks);
+}
EXPORT_SYMBOL(packing);
MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
diff --git a/lib/packing_test.c b/lib/packing_test.c
new file mode 100644
index 000000000000..b38ea43c03fd
--- /dev/null
+++ b/lib/packing_test.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Vladimir Oltean <olteanv@gmail.com>
+ * Copyright (c) 2024, Intel Corporation.
+ */
+#include <kunit/test.h>
+#include <linux/packing.h>
+
+struct packing_test_case {
+ const char *desc;
+ const u8 *pbuf;
+ size_t pbuf_size;
+ u64 uval;
+ size_t start_bit;
+ size_t end_bit;
+ u8 quirks;
+};
+
+#define NO_QUIRKS 0
+
+/**
+ * PBUF - Initialize .pbuf and .pbuf_size
+ * @array: elements of constant physical buffer
+ *
+ * Initializes the .pbuf and .pbuf_size fields of a struct packing_test_case
+ * with a constant array of the specified elements.
+ */
+#define PBUF(array...) \
+ .pbuf = (const u8[]){ array }, \
+ .pbuf_size = sizeof((const u8 []){ array })
+
+static const struct packing_test_case cases[] = {
+ /* These tests pack and unpack a magic 64-bit value
+ * (0xcafedeadbeefcafe) at a fixed logical offset (32) within an
+ * otherwise zero array of 128 bits (16 bytes). They test all possible
+ * bit layouts of the 128 bit buffer.
+ */
+ {
+ .desc = "no quirks, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad,
+ 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "lsw32 first, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xbe, 0xef, 0xca, 0xfe,
+ 0xca, 0xfe, 0xde, 0xad, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST,
+ },
+ {
+ .desc = "little endian words, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xad, 0xde, 0xfe, 0xca,
+ 0xfe, 0xca, 0xef, 0xbe, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
+ 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "msb right, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x53, 0x7f, 0x7b, 0xb5,
+ 0x7d, 0xf7, 0x53, 0x7f, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT,
+ },
+ {
+ .desc = "msb right + lsw32 first, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x7d, 0xf7, 0x53, 0x7f,
+ 0x53, 0x7f, 0x7b, 0xb5, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST,
+ },
+ {
+ .desc = "msb right + little endian words, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xb5, 0x7b, 0x7f, 0x53,
+ 0x7f, 0x53, 0xf7, 0x7d, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "msb right + lsw32 first + little endian words, 16 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x7f, 0x53, 0xf7, 0x7d,
+ 0xb5, 0x7b, 0x7f, 0x53, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ /* These tests pack and unpack a magic 64-bit value
+ * (0xcafedeadbeefcafe) at a fixed logical offset (32) within an
+ * otherwise zero array of varying size from 18 bytes to 24 bytes.
+ */
+ {
+ .desc = "no quirks, 18 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xfe,
+ 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00,
+ 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "no quirks, 19 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca,
+ 0xfe, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0x00,
+ 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "no quirks, 20 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe,
+ 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "no quirks, 22 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef,
+ 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "no quirks, 24 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad,
+ 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 18 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
+ 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 19 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
+ 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 20 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
+ 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 22 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
+ 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 24 bytes",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
+ 0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xcafedeadbeefcafe,
+ .start_bit = 95,
+ .end_bit = 32,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ /* These tests pack and unpack a magic 64-bit value
+ * (0x1122334455667788) at an odd starting bit (43) within an
+ * otherwise zero array of 128 bits (16 bytes). They test all possible
+ * bit layouts of the 128 bit buffer.
+ */
+ {
+ .desc = "no quirks, 16 bytes, non-aligned",
+ PBUF(0x00, 0x00, 0x00, 0x89, 0x11, 0x9a, 0x22, 0xab,
+ 0x33, 0xbc, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "lsw32 first, 16 bytes, non-aligned",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x33, 0xbc, 0x40, 0x00,
+ 0x11, 0x9a, 0x22, 0xab, 0x00, 0x00, 0x00, 0x89),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_LSW32_IS_FIRST,
+ },
+ {
+ .desc = "little endian words, 16 bytes, non-aligned",
+ PBUF(0x89, 0x00, 0x00, 0x00, 0xab, 0x22, 0x9a, 0x11,
+ 0x00, 0x40, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 16 bytes, non-aligned",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xbc, 0x33,
+ 0xab, 0x22, 0x9a, 0x11, 0x89, 0x00, 0x00, 0x00),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "msb right, 16 bytes, non-aligned",
+ PBUF(0x00, 0x00, 0x00, 0x91, 0x88, 0x59, 0x44, 0xd5,
+ 0xcc, 0x3d, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT,
+ },
+ {
+ .desc = "msb right + lsw32 first, 16 bytes, non-aligned",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xcc, 0x3d, 0x02, 0x00,
+ 0x88, 0x59, 0x44, 0xd5, 0x00, 0x00, 0x00, 0x91),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST,
+ },
+ {
+ .desc = "msb right + little endian words, 16 bytes, non-aligned",
+ PBUF(0x91, 0x00, 0x00, 0x00, 0xd5, 0x44, 0x59, 0x88,
+ 0x00, 0x02, 0x3d, 0xcc, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "msb right + lsw32 first + little endian words, 16 bytes, non-aligned",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x3d, 0xcc,
+ 0xd5, 0x44, 0x59, 0x88, 0x91, 0x00, 0x00, 0x00),
+ .uval = 0x1122334455667788,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ /* These tests pack and unpack a u64 with all bits set
+ * (0xffffffffffffffff) at an odd starting bit (43) within an
+ * otherwise zero array of 128 bits (16 bytes). They test all possible
+ * bit layouts of the 128 bit buffer.
+ */
+ {
+ .desc = "no quirks, 16 bytes, non-aligned, 0xff",
+ PBUF(0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = NO_QUIRKS,
+ },
+ {
+ .desc = "lsw32 first, 16 bytes, non-aligned, 0xff",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x07, 0xff),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_LSW32_IS_FIRST,
+ },
+ {
+ .desc = "little endian words, 16 bytes, non-aligned, 0xff",
+ PBUF(0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "lsw32 first + little endian words, 16 bytes, non-aligned, 0xff",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "msb right, 16 bytes, non-aligned, 0xff",
+ PBUF(0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT,
+ },
+ {
+ .desc = "msb right + lsw32 first, 16 bytes, non-aligned, 0xff",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xe0, 0xff),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST,
+ },
+ {
+ .desc = "msb right + little endian words, 16 bytes, non-aligned, 0xff",
+ PBUF(0xff, 0xe0, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x1f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN,
+ },
+ {
+ .desc = "msb right + lsw32 first + little endian words, 16 bytes, non-aligned, 0xff",
+ PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x00, 0x00),
+ .uval = 0xffffffffffffffff,
+ .start_bit = 106,
+ .end_bit = 43,
+ .quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(packing, cases, desc);
+
+static void packing_test_pack(struct kunit *test)
+{
+ const struct packing_test_case *params = test->param_value;
+ u8 *pbuf;
+ int err;
+
+ pbuf = kunit_kzalloc(test, params->pbuf_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pbuf);
+
+ err = pack(pbuf, params->uval, params->start_bit, params->end_bit,
+ params->pbuf_size, params->quirks);
+
+ KUNIT_EXPECT_EQ_MSG(test, err, 0, "pack() returned %pe\n", ERR_PTR(err));
+ KUNIT_EXPECT_MEMEQ(test, pbuf, params->pbuf, params->pbuf_size);
+}
+
+static void packing_test_unpack(struct kunit *test)
+{
+ const struct packing_test_case *params = test->param_value;
+ u64 uval;
+ int err;
+
+ err = unpack(params->pbuf, &uval, params->start_bit, params->end_bit,
+ params->pbuf_size, params->quirks);
+ KUNIT_EXPECT_EQ_MSG(test, err, 0, "unpack() returned %pe\n", ERR_PTR(err));
+ KUNIT_EXPECT_EQ(test, uval, params->uval);
+}
+
+static struct kunit_case packing_test_cases[] = {
+ KUNIT_CASE_PARAM(packing_test_pack, packing_gen_params),
+ KUNIT_CASE_PARAM(packing_test_unpack, packing_gen_params),
+ {},
+};
+
+static struct kunit_suite packing_test_suite = {
+ .name = "packing",
+ .test_cases = packing_test_cases,
+};
+
+kunit_test_suite(packing_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for packing library");
diff --git a/lib/random32.c b/lib/random32.c
index 0a5a0e3600c8..24e7acd9343f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -36,7 +36,7 @@
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/jiffies.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 41ae3c7570d3..8655a76d29a1 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rbtree_augmented.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <asm/timex.h>
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index fa5edd6ef7f7..2eed1ad958e9 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -14,7 +14,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/highmem.h>
#include <linux/sched.h>
diff --git a/lib/test_parman.c b/lib/test_parman.c
index 35e32243693c..f9b97426a337 100644
--- a/lib/test_parman.c
+++ b/lib/test_parman.c
@@ -39,7 +39,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/parman.h>
#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 8448b6d02bd9..59dbe4f9a4cb 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -386,6 +386,66 @@ kernel_ptr(void)
static void __init
struct_resource(void)
{
+ struct resource test_resource = {
+ .start = 0xc0ffee00,
+ .end = 0xc0ffee00,
+ .flags = IORESOURCE_MEM,
+ };
+
+ test("[mem 0xc0ffee00 flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xc0ffee,
+ .end = 0xba5eba11,
+ .flags = IORESOURCE_MEM,
+ };
+ test("[mem 0x00c0ffee-0xba5eba11 flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xba5eba11,
+ .end = 0xc0ffee,
+ .flags = IORESOURCE_MEM,
+ };
+ test("[mem 0xba5eba11-0x00c0ffee flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xba5eba11,
+ .end = 0xba5eca11,
+ .flags = IORESOURCE_MEM,
+ };
+
+ test("[mem 0xba5eba11-0xba5eca11 flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xba11,
+ .end = 0xca10,
+ .flags = IORESOURCE_IO |
+ IORESOURCE_DISABLED |
+ IORESOURCE_UNSET,
+ };
+
+ test("[io size 0x1000 disabled]",
+ "%pR", &test_resource);
+}
+
+static void __init
+struct_range(void)
+{
+ struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
+ 0xc0ffee00ba5eba11);
+ test("[range 0xc0ffee00ba5eba11]", "%pra", &test_range);
+
+ test_range = DEFINE_RANGE(0xc0ffee, 0xba5eba11);
+ test("[range 0x0000000000c0ffee-0x00000000ba5eba11]",
+ "%pra", &test_range);
+
+ test_range = DEFINE_RANGE(0xba5eba11, 0xc0ffee);
+ test("[range 0x00000000ba5eba11-0x0000000000c0ffee]",
+ "%pra", &test_range);
}
static void __init
@@ -763,6 +823,7 @@ test_pointer(void)
symbol_ptr();
kernel_ptr();
struct_resource();
+ struct_range();
addr();
escaped_str();
hex_string();
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index 7257b1768545..44f8508c9d88 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/printk.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c5e2ec9303c5..6ac02bbb7df1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1040,6 +1040,20 @@ static const struct printf_spec default_dec04_spec = {
};
static noinline_for_stack
+char *hex_range(char *buf, char *end, u64 start_val, u64 end_val,
+ struct printf_spec spec)
+{
+ buf = number(buf, end, start_val, spec);
+ if (start_val == end_val)
+ return buf;
+
+ if (buf < end)
+ *buf = '-';
+ ++buf;
+ return number(buf, end, end_val, spec);
+}
+
+static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
{
@@ -1115,11 +1129,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
p = string_nocheck(p, pend, "size ", str_spec);
p = number(p, pend, resource_size(res), *specp);
} else {
- p = number(p, pend, res->start, *specp);
- if (res->start != res->end) {
- *p++ = '-';
- p = number(p, pend, res->end, *specp);
- }
+ p = hex_range(p, pend, res->start, res->end, *specp);
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
@@ -1141,6 +1151,31 @@ char *resource_string(char *buf, char *end, struct resource *res,
}
static noinline_for_stack
+char *range_string(char *buf, char *end, const struct range *range,
+ struct printf_spec spec, const char *fmt)
+{
+ char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")];
+ char *p = sym, *pend = sym + sizeof(sym);
+
+ struct printf_spec range_spec = {
+ .field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */
+ .flags = SPECIAL | SMALL | ZEROPAD,
+ .base = 16,
+ .precision = -1,
+ };
+
+ if (check_pointer(&buf, end, range, spec))
+ return buf;
+
+ p = string_nocheck(p, pend, "[range ", default_str_spec);
+ p = hex_range(p, pend, range->start, range->end, range_spec);
+ *p++ = ']';
+ *p = '\0';
+
+ return string_nocheck(buf, end, sym, spec);
+}
+
+static noinline_for_stack
char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
@@ -2229,6 +2264,15 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
return widen_string(buf, buf - buf_start, end, spec);
}
+static noinline_for_stack
+char *resource_or_range(const char *fmt, char *buf, char *end, void *ptr,
+ struct printf_spec spec)
+{
+ if (*fmt == 'r' && fmt[1] == 'a')
+ return range_string(buf, end, ptr, spec, fmt);
+ return resource_string(buf, end, ptr, spec, fmt);
+}
+
int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
@@ -2277,6 +2321,7 @@ char *rust_fmt_argument(char *buf, char *end, void *ptr);
* - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
+ * - 'ra' For struct ranges, e.g., [range 0x0000000000000000 - 0x00000000000000ff]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
* width which must be explicitly specified either as part of the
* format string '%32b[l]' or through '%*b[l]', [l] selects
@@ -2401,7 +2446,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
case 'r':
- return resource_string(buf, end, ptr, spec, fmt);
+ return resource_or_range(fmt, buf, end, ptr, spec);
case 'h':
return hex_string(buf, end, ptr, spec, fmt);
case 'b':