diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/auditsc.c | 6 | ||||
-rw-r--r-- | kernel/crash_core.c | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 15 | ||||
-rw-r--r-- | kernel/events/hw_breakpoint.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 26 | ||||
-rw-r--r-- | kernel/futex/core.c | 22 | ||||
-rw-r--r-- | kernel/futex/futex.h | 59 | ||||
-rw-r--r-- | kernel/hung_task.c | 18 | ||||
-rw-r--r-- | kernel/kthread.c | 2 | ||||
-rw-r--r-- | kernel/module/internal.h | 7 | ||||
-rw-r--r-- | kernel/module/main.c | 569 | ||||
-rw-r--r-- | kernel/notifier.c | 8 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 5 | ||||
-rw-r--r-- | kernel/reboot.c | 15 | ||||
-rw-r--r-- | kernel/resource.c | 66 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 14 | ||||
-rw-r--r-- | kernel/watchdog.c | 3 |
18 files changed, 611 insertions, 236 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 91afdd0d036e..279ba5c420a4 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2729,7 +2729,7 @@ void __audit_ptrace(struct task_struct *t) context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); security_task_getlsmprop_obj(t, &context->target_ref); - memcpy(context->target_comm, t->comm, TASK_COMM_LEN); + strscpy(context->target_comm, t->comm); } /** @@ -2756,7 +2756,7 @@ int audit_signal_info_syscall(struct task_struct *t) ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getlsmprop_obj(t, &ctx->target_ref); - memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); + strscpy(ctx->target_comm, t->comm); return 0; } @@ -2777,7 +2777,7 @@ int audit_signal_info_syscall(struct task_struct *t) axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getlsmprop_obj(t, &axp->target_ref[axp->pid_count]); - memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); + strscpy(axp->target_comm[axp->pid_count], t->comm); axp->pid_count++; return 0; diff --git a/kernel/crash_core.c b/kernel/crash_core.c index c1048893f4b6..078fe5bc5a74 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -505,7 +505,8 @@ int crash_check_hotplug_support(void) crash_hotplug_lock(); /* Obtain lock while reading crash information */ if (!kexec_trylock()) { - pr_info("kexec_trylock() failed, kdump image may be inaccurate\n"); + if (!kexec_in_progress) + pr_info("kexec_trylock() failed, kdump image may be inaccurate\n"); crash_hotplug_unlock(); return 0; } @@ -547,7 +548,8 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, crash_hotplug_lock(); /* Obtain lock while changing crash information */ if (!kexec_trylock()) { - pr_info("kexec_trylock() failed, kdump image may be inaccurate\n"); + if (!kexec_in_progress) + pr_info("kexec_trylock() failed, kdump image may be inaccurate\n"); crash_hotplug_unlock(); return; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 5d4a54f50826..065f9188b44a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3778,18 +3778,11 @@ static bool perf_less_group_idx(const void *l, const void *r, void __always_unus return le->group_index < re->group_index; } -static void swap_ptr(void *l, void *r, void __always_unused *args) -{ - void **lp = l, **rp = r; - - swap(*lp, *rp); -} - DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap); static const struct min_heap_callbacks perf_min_heap = { .less = perf_less_group_idx, - .swp = swap_ptr, + .swp = NULL, }; static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) @@ -3870,7 +3863,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu); } - min_heapify_all(&event_heap, &perf_min_heap, NULL); + min_heapify_all_inline(&event_heap, &perf_min_heap, NULL); while (event_heap.nr) { ret = func(*evt, data); @@ -3879,9 +3872,9 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, *evt = perf_event_groups_next(*evt, pmu); if (*evt) - min_heap_sift_down(&event_heap, 0, &perf_min_heap, NULL); + min_heap_sift_down_inline(&event_heap, 0, &perf_min_heap, NULL); else - min_heap_pop(&event_heap, &perf_min_heap, NULL); + min_heap_pop_inline(&event_heap, &perf_min_heap, NULL); } return 0; diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 6c2cb4e4f48d..bc4a61029b6d 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -849,7 +849,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, cpu_events = alloc_percpu(typeof(*cpu_events)); if (!cpu_events) - return (void __percpu __force *)ERR_PTR(-ENOMEM); + return ERR_PTR_PCPU(-ENOMEM); cpus_read_lock(); for_each_online_cpu(cpu) { @@ -868,7 +868,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, return cpu_events; unregister_wide_hw_breakpoint(cpu_events); - return (void __percpu __force *)ERR_PTR(err); + return ERR_PTR_PCPU(err); } EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); diff --git a/kernel/fork.c b/kernel/fork.c index f253e81d0c28..1450b461d196 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -621,6 +621,12 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) exe_file = get_mm_exe_file(oldmm); RCU_INIT_POINTER(mm->exe_file, exe_file); + /* + * We depend on the oldmm having properly denied write access to the + * exe_file already. + */ + if (exe_file && deny_write_access(exe_file)) + pr_warn_once("deny_write_access() failed in %s\n", __func__); } #ifdef CONFIG_MMU @@ -1413,11 +1419,20 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) */ old_exe_file = rcu_dereference_raw(mm->exe_file); - if (new_exe_file) + if (new_exe_file) { + /* + * We expect the caller (i.e., sys_execve) to already denied + * write access, so this is unlikely to fail. + */ + if (unlikely(deny_write_access(new_exe_file))) + return -EACCES; get_file(new_exe_file); + } rcu_assign_pointer(mm->exe_file, new_exe_file); - if (old_exe_file) + if (old_exe_file) { + allow_write_access(old_exe_file); fput(old_exe_file); + } return 0; } @@ -1456,6 +1471,9 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) return ret; } + ret = deny_write_access(new_exe_file); + if (ret) + return -EACCES; get_file(new_exe_file); /* set the new file */ @@ -1464,8 +1482,10 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) rcu_assign_pointer(mm->exe_file, new_exe_file); mmap_write_unlock(mm); - if (old_exe_file) + if (old_exe_file) { + allow_write_access(old_exe_file); fput(old_exe_file); + } return 0; } diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 6de57246760e..ebdd76b4ecbb 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -451,28 +451,6 @@ struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key * return NULL; } -int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) -{ - int ret; - - pagefault_disable(); - ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); - pagefault_enable(); - - return ret; -} - -int futex_get_value_locked(u32 *dest, u32 __user *from) -{ - int ret; - - pagefault_disable(); - ret = __get_user(*dest, from); - pagefault_enable(); - - return ret ? -EFAULT : 0; -} - /** * wait_for_owner_exiting - Block until the owner has exited * @ret: owner's current futex lock status diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index 8b195d06f4e8..618ce1fe870e 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -6,6 +6,7 @@ #include <linux/rtmutex.h> #include <linux/sched/wake_q.h> #include <linux/compat.h> +#include <linux/uaccess.h> #ifdef CONFIG_PREEMPT_RT #include <linux/rcuwait.h> @@ -225,10 +226,64 @@ extern bool __futex_wake_mark(struct futex_q *q); extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q); extern int fault_in_user_writeable(u32 __user *uaddr); -extern int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval); -extern int futex_get_value_locked(u32 *dest, u32 __user *from); extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key); +static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) +{ + int ret; + + pagefault_disable(); + ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); + pagefault_enable(); + + return ret; +} + +/* + * This does a plain atomic user space read, and the user pointer has + * already been verified earlier by get_futex_key() to be both aligned + * and actually in user space, just like futex_atomic_cmpxchg_inatomic(). + * + * We still want to avoid any speculation, and while __get_user() is + * the traditional model for this, it's actually slower than doing + * this manually these days. + * + * We could just have a per-architecture special function for it, + * the same way we do futex_atomic_cmpxchg_inatomic(), but rather + * than force everybody to do that, write it out long-hand using + * the low-level user-access infrastructure. + * + * This looks a bit overkill, but generally just results in a couple + * of instructions. + */ +static __always_inline int futex_read_inatomic(u32 *dest, u32 __user *from) +{ + u32 val; + + if (can_do_masked_user_access()) + from = masked_user_access_begin(from); + else if (!user_read_access_begin(from, sizeof(*from))) + return -EFAULT; + unsafe_get_user(val, from, Efault); + user_access_end(); + *dest = val; + return 0; +Efault: + user_access_end(); + return -EFAULT; +} + +static inline int futex_get_value_locked(u32 *dest, u32 __user *from) +{ + int ret; + + pagefault_disable(); + ret = futex_read_inatomic(dest, from); + pagefault_enable(); + + return ret; +} + extern void __futex_unqueue(struct futex_q *q); extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb); extern int futex_unqueue(struct futex_q *q); diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 959d99583d1c..c18717189f32 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -31,6 +31,11 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; /* + * Total number of tasks detected as hung since boot: + */ +static unsigned long __read_mostly sysctl_hung_task_detect_count; + +/* * Limit number of tasks checked in a batch. * * This value controls the preemptibility of khungtaskd since preemption @@ -115,6 +120,12 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) if (time_is_after_jiffies(t->last_switch_time + timeout * HZ)) return; + /* + * This counter tracks the total number of tasks detected as hung + * since boot. + */ + sysctl_hung_task_detect_count++; + trace_sched_process_hang(t); if (sysctl_hung_task_panic) { @@ -314,6 +325,13 @@ static struct ctl_table hung_task_sysctls[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_NEG_ONE, }, + { + .procname = "hung_task_detect_count", + .data = &sysctl_hung_task_detect_count, + .maxlen = sizeof(unsigned long), + .mode = 0444, + .proc_handler = proc_doulongvec_minmax, + }, }; static void __init hung_task_sysctl_init(void) diff --git a/kernel/kthread.c b/kernel/kthread.c index 9bb36897b6c6..a5ac612b1609 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -101,7 +101,7 @@ void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk) struct kthread *kthread = to_kthread(tsk); if (!kthread || !kthread->full_name) { - __get_task_comm(buf, buf_size, tsk); + strscpy(buf, tsk->comm, buf_size); return; } diff --git a/kernel/module/internal.h b/kernel/module/internal.h index 2ebece8a789f..daef2be83902 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -80,7 +80,12 @@ struct load_info { unsigned int used_pages; #endif struct { - unsigned int sym, str, mod, vers, info, pcpu; + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; } index; }; diff --git a/kernel/module/main.c b/kernel/module/main.c index d2e1b8976c7b..5399c182b3cb 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -195,6 +195,38 @@ static unsigned int find_sec(const struct load_info *info, const char *name) return 0; } +/** + * find_any_unique_sec() - Find a unique section index by name + * @info: Load info for the module to scan + * @name: Name of the section we're looking for + * + * Locates a unique section by name. Ignores SHF_ALLOC. + * + * Return: Section index if found uniquely, zero if absent, negative count + * of total instances if multiple were found. + */ +static int find_any_unique_sec(const struct load_info *info, const char *name) +{ + unsigned int idx; + unsigned int count = 0; + int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + if (strcmp(info->secstrings + info->sechdrs[i].sh_name, + name) == 0) { + count++; + idx = i; + } + } + if (count == 1) { + return idx; + } else if (count == 0) { + return 0; + } else { + return -count; + } +} + /* Find a module section, or NULL. */ static void *section_addr(const struct load_info *info, const char *name) { @@ -1679,7 +1711,7 @@ bool __weak module_exit_section(const char *name) return strstarts(name, ".exit"); } -static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) +static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr) { #if defined(CONFIG_64BIT) unsigned long long secend; @@ -1698,62 +1730,80 @@ static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) return 0; } -/* - * Check userspace passed ELF module against our expectations, and cache - * useful variables for further processing as we go. - * - * This does basic validity checks against section offsets and sizes, the - * section name string table, and the indices used for it (sh_name). +/** + * elf_validity_ehdr() - Checks an ELF header for module validity + * @info: Load info containing the ELF header to check * - * As a last step, since we're already checking the ELF sections we cache - * useful variables which will be used later for our convenience: + * Checks whether an ELF header could belong to a valid module. Checks: * - * o pointers to section headers - * o cache the modinfo symbol section - * o cache the string symbol section - * o cache the module section + * * ELF header is within the data the user provided + * * ELF magic is present + * * It is relocatable (not final linked, not core file, etc.) + * * The header's machine type matches what the architecture expects. + * * Optional arch-specific hook for other properties + * - module_elf_check_arch() is currently only used by PPC to check + * ELF ABI version, but may be used by others in the future. * - * As a last step we set info->mod to the temporary copy of the module in - * info->hdr. The final one will be allocated in move_module(). Any - * modifications we make to our copy of the module will be carried over - * to the final minted module. + * Return: %0 if valid, %-ENOEXEC on failure. */ -static int elf_validity_cache_copy(struct load_info *info, int flags) +static int elf_validity_ehdr(const struct load_info *info) { - unsigned int i; - Elf_Shdr *shdr, *strhdr; - int err; - unsigned int num_mod_secs = 0, mod_idx; - unsigned int num_info_secs = 0, info_idx; - unsigned int num_sym_secs = 0, sym_idx; - if (info->len < sizeof(*(info->hdr))) { pr_err("Invalid ELF header len %lu\n", info->len); - goto no_exec; + return -ENOEXEC; } - if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { pr_err("Invalid ELF header magic: != %s\n", ELFMAG); - goto no_exec; + return -ENOEXEC; } if (info->hdr->e_type != ET_REL) { pr_err("Invalid ELF header type: %u != %u\n", info->hdr->e_type, ET_REL); - goto no_exec; + return -ENOEXEC; } if (!elf_check_arch(info->hdr)) { pr_err("Invalid architecture in ELF header: %u\n", info->hdr->e_machine); - goto no_exec; + return -ENOEXEC; } if (!module_elf_check_arch(info->hdr)) { pr_err("Invalid module architecture in ELF header: %u\n", info->hdr->e_machine); - goto no_exec; + return -ENOEXEC; } + return 0; +} + +/** + * elf_validity_cache_sechdrs() - Cache section headers if valid + * @info: Load info to compute section headers from + * + * Checks: + * + * * ELF header is valid (see elf_validity_ehdr()) + * * Section headers are the size we expect + * * Section array fits in the user provided data + * * Section index 0 is NULL + * * Section contents are inbounds + * + * Then updates @info with a &load_info->sechdrs pointer if valid. + * + * Return: %0 if valid, negative error code if validation failed. + */ +static int elf_validity_cache_sechdrs(struct load_info *info) +{ + Elf_Shdr *sechdrs; + Elf_Shdr *shdr; + int i; + int err; + + err = elf_validity_ehdr(info); + if (err < 0) + return err; + if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { pr_err("Invalid ELF section header size\n"); - goto no_exec; + return -ENOEXEC; } /* @@ -1765,10 +1815,66 @@ static int elf_validity_cache_copy(struct load_info *info, int flags) || (info->hdr->e_shnum * sizeof(Elf_Shdr) > info->len - info->hdr->e_shoff)) { pr_err("Invalid ELF section header overflow\n"); - goto no_exec; + return -ENOEXEC; } - info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + sechdrs = (void *)info->hdr + info->hdr->e_shoff; + + /* + * The code assumes that section 0 has a length of zero and + * an addr of zero, so check for it. + */ + if (sechdrs[0].sh_type != SHT_NULL + || sechdrs[0].sh_size != 0 + || sechdrs[0].sh_addr != 0) { + pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", + sechdrs[0].sh_type); + return -ENOEXEC; + } + + /* Validate contents are inbounds */ + for (i = 1; i < info->hdr->e_shnum; i++) { + shdr = &sechdrs[i]; + switch (shdr->sh_type) { + case SHT_NULL: + case SHT_NOBITS: + /* No contents, offset/size don't mean anything */ + continue; + default: + err = validate_section_offset(info, shdr); + if (err < 0) { + pr_err("Invalid ELF section in module (section %u type %u)\n", + i, shdr->sh_type); + return err; + } + } + } + + info->sechdrs = sechdrs; + + return 0; +} + +/** + * elf_validity_cache_secstrings() - Caches section names if valid + * @info: Load info to cache section names from. Must have valid sechdrs. + * + * Specifically checks: + * + * * Section name table index is inbounds of section headers + * * Section name table is not empty + * * Section name table is NUL terminated + * * All section name offsets are inbounds of the section + * + * Then updates @info with a &load_info->secstrings pointer if valid. + * + * Return: %0 if valid, negative error code if validation failed. + */ +static int elf_validity_cache_secstrings(struct load_info *info) +{ + Elf_Shdr *strhdr, *shdr; + char *secstrings; + int i; /* * Verify if the section name table index is valid. @@ -1778,165 +1884,234 @@ static int elf_validity_cache_copy(struct load_info *info, int flags) pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", info->hdr->e_shstrndx, info->hdr->e_shstrndx, info->hdr->e_shnum); - goto no_exec; + return -ENOEXEC; } strhdr = &info->sechdrs[info->hdr->e_shstrndx]; - err = validate_section_offset(info, strhdr); - if (err < 0) { - pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); - return err; - } /* * The section name table must be NUL-terminated, as required * by the spec. This makes strcmp and pr_* calls that access * strings in the section safe. */ - info->secstrings = (void *)info->hdr + strhdr->sh_offset; + secstrings = (void *)info->hdr + strhdr->sh_offset; if (strhdr->sh_size == 0) { pr_err("empty section name table\n"); - goto no_exec; + return -ENOEXEC; } - if (info->secstrings[strhdr->sh_size - 1] != '\0') { + if (secstrings[strhdr->sh_size - 1] != '\0') { pr_err("ELF Spec violation: section name table isn't null terminated\n"); - goto no_exec; - } - - /* - * The code assumes that section 0 has a length of zero and - * an addr of zero, so check for it. - */ - if (info->sechdrs[0].sh_type != SHT_NULL - || info->sechdrs[0].sh_size != 0 - || info->sechdrs[0].sh_addr != 0) { - pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", - info->sechdrs[0].sh_type); - goto no_exec; + return -ENOEXEC; } - for (i = 1; i < info->hdr->e_shnum; i++) { + for (i = 0; i < info->hdr->e_shnum; i++) { shdr = &info->sechdrs[i]; - switch (shdr->sh_type) { - case SHT_NULL: - case SHT_NOBITS: + /* SHT_NULL means sh_name has an undefined value */ + if (shdr->sh_type == SHT_NULL) continue; - case SHT_SYMTAB: - if (shdr->sh_link == SHN_UNDEF - || shdr->sh_link >= info->hdr->e_shnum) { - pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", - shdr->sh_link, shdr->sh_link, - info->hdr->e_shnum); - goto no_exec; - } - num_sym_secs++; - sym_idx = i; - fallthrough; - default: - err = validate_section_offset(info, shdr); - if (err < 0) { - pr_err("Invalid ELF section in module (section %u type %u)\n", - i, shdr->sh_type); - return err; - } - if (strcmp(info->secstrings + shdr->sh_name, - ".gnu.linkonce.this_module") == 0) { - num_mod_secs++; - mod_idx = i; - } else if (strcmp(info->secstrings + shdr->sh_name, - ".modinfo") == 0) { - num_info_secs++; - info_idx = i; - } - - if (shdr->sh_flags & SHF_ALLOC) { - if (shdr->sh_name >= strhdr->sh_size) { - pr_err("Invalid ELF section name in module (section %u type %u)\n", - i, shdr->sh_type); - return -ENOEXEC; - } - } - break; + if (shdr->sh_name >= strhdr->sh_size) { + pr_err("Invalid ELF section name in module (section %u type %u)\n", + i, shdr->sh_type); + return -ENOEXEC; } } - if (num_info_secs > 1) { + info->secstrings = secstrings; + return 0; +} + +/** + * elf_validity_cache_index_info() - Validate and cache modinfo section + * @info: Load info to populate the modinfo index on. + * Must have &load_info->sechdrs and &load_info->secstrings populated + * + * Checks that if there is a .modinfo section, it is unique. + * Then, it caches its index in &load_info->index.info. + * Finally, it tries to populate the name to improve error messages. + * + * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found. + */ +static int elf_validity_cache_index_info(struct load_info *info) +{ + int info_idx; + + info_idx = find_any_unique_sec(info, ".modinfo"); + + if (info_idx == 0) + /* Early return, no .modinfo */ + return 0; + + if (info_idx < 0) { pr_err("Only one .modinfo section must exist.\n"); - goto no_exec; - } else if (num_info_secs == 1) { - /* Try to find a name early so we can log errors with a module name */ - info->index.info = info_idx; - info->name = get_modinfo(info, "name"); + return -ENOEXEC; } - if (num_sym_secs != 1) { - pr_warn("%s: module has no symbols (stripped?)\n", - info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; - } + info->index.info = info_idx; + /* Try to find a name early so we can log errors with a module name */ + info->name = get_modinfo(info, "name"); - /* Sets internal symbols and strings. */ - info->index.sym = sym_idx; - shdr = &info->sechdrs[sym_idx]; - info->index.str = shdr->sh_link; - info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; + return 0; +} - /* - * The ".gnu.linkonce.this_module" ELF section is special. It is - * what modpost uses to refer to __this_module and let's use rely - * on THIS_MODULE to point to &__this_module properly. The kernel's - * modpost declares it on each modules's *.mod.c file. If the struct - * module of the kernel changes a full kernel rebuild is required. - * - * We have a few expectaions for this special section, the following - * code validates all this for us: - * - * o Only one section must exist - * o We expect the kernel to always have to allocate it: SHF_ALLOC - * o The section size must match the kernel's run time's struct module - * size - */ - if (num_mod_secs != 1) { - pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n", +/** + * elf_validity_cache_index_mod() - Validates and caches this_module section + * @info: Load info to cache this_module on. + * Must have &load_info->sechdrs and &load_info->secstrings populated + * + * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost + * uses to refer to __this_module and let's use rely on THIS_MODULE to point + * to &__this_module properly. The kernel's modpost declares it on each + * modules's *.mod.c file. If the struct module of the kernel changes a full + * kernel rebuild is required. + * + * We have a few expectations for this special section, this function + * validates all this for us: + * + * * The section has contents + * * The section is unique + * * We expect the kernel to always have to allocate it: SHF_ALLOC + * * The section size must match the kernel's run time's struct module + * size + * + * If all checks pass, the index will be cached in &load_info->index.mod + * + * Return: %0 on validation success, %-ENOEXEC on failure + */ +static int elf_validity_cache_index_mod(struct load_info *info) +{ + Elf_Shdr *shdr; + int mod_idx; + + mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); + if (mod_idx <= 0) { + pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } shdr = &info->sechdrs[mod_idx]; - /* - * This is already implied on the switch above, however let's be - * pedantic about it. - */ if (shdr->sh_type == SHT_NOBITS) { pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } if (!(shdr->sh_flags & SHF_ALLOC)) { pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } if (shdr->sh_size != sizeof(struct module)) { pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", info->name ?: "(missing .modinfo section or name field)"); - goto no_exec; + return -ENOEXEC; } info->index.mod = mod_idx; - /* This is temporary: point mod into copy of data. */ - info->mod = (void *)info->hdr + shdr->sh_offset; + return 0; +} - /* - * If we didn't load the .modinfo 'name' field earlier, fall back to - * on-disk struct mod 'name' field. - */ - if (!info->name) - info->name = info->mod->name; +/** + * elf_validity_cache_index_sym() - Validate and cache symtab index + * @info: Load info to cache symtab index in. + * Must have &load_info->sechdrs and &load_info->secstrings populated. + * + * Checks that there is exactly one symbol table, then caches its index in + * &load_info->index.sym. + * + * Return: %0 if valid, %-ENOEXEC on failure. + */ +static int elf_validity_cache_index_sym(struct load_info *info) +{ + unsigned int sym_idx; + unsigned int num_sym_secs = 0; + int i; + + for (i = 1; i < info->hdr->e_shnum; i++) { + if (info->sechdrs[i].sh_type == SHT_SYMTAB) { + num_sym_secs++; + sym_idx = i; + } + } + + if (num_sym_secs != 1) { + pr_warn("%s: module has no symbols (stripped?)\n", + info->name ?: "(missing .modinfo section or name field)"); + return -ENOEXEC; + } + + info->index.sym = sym_idx; + + return 0; +} + +/** + * elf_validity_cache_index_str() - Validate and cache strtab index + * @info: Load info to cache strtab index in. + * Must have &load_info->sechdrs and &load_info->secstrings populated. + * Must have &load_info->index.sym populated. + * + * Looks at the symbol table's associated string table, makes sure it is + * in-bounds, and caches it. + * + * Return: %0 if valid, %-ENOEXEC on failure. + */ +static int elf_validity_cache_index_str(struct load_info *info) +{ + unsigned int str_idx = info->sechdrs[info->index.sym].sh_link; + + if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) { + pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", + str_idx, str_idx, info->hdr->e_shnum); + return -ENOEXEC; + } + + info->index.str = str_idx; + return 0; +} + +/** + * elf_validity_cache_index() - Resolve, validate, cache section indices + * @info: Load info to read from and update. + * &load_info->sechdrs and &load_info->secstrings must be populated. + * @flags: Load flags, relevant to suppress version loading, see + * uapi/linux/module.h + * + * Populates &load_info->index, validating as it goes. + * See child functions for per-field validation: + * + * * elf_validity_cache_index_info() + * * elf_validity_cache_index_mod() + * * elf_validity_cache_index_sym() + * * elf_validity_cache_index_str() + * + * If versioning is not suppressed via flags, load the version index from + * a section called "__versions" with no validation. + * + * If CONFIG_SMP is enabled, load the percpu section by name with no + * validation. + * + * Return: 0 on success, negative error code if an index failed validation. + */ +static int elf_validity_cache_index(struct load_info *info, int flags) +{ + int err; + + err = elf_validity_cache_index_info(info); + if (err < 0) + return err; + err = elf_validity_cache_index_mod(info); + if (err < 0) + return err; + err = elf_validity_cache_index_sym(info); + if (err < 0) + return err; + err = elf_validity_cache_index_str(info); + if (err < 0) + return err; if (flags & MODULE_INIT_IGNORE_MODVERSIONS) info->index.vers = 0; /* Pretend no __versions section! */ @@ -1946,9 +2121,109 @@ static int elf_validity_cache_copy(struct load_info *info, int flags) info->index.pcpu = find_pcpusec(info); return 0; +} -no_exec: - return -ENOEXEC; +/** + * elf_validity_cache_strtab() - Validate and cache symbol string table + * @info: Load info to read from and update. + * Must have &load_info->sechdrs and &load_info->secstrings populated. + * Must have &load_info->index populated. + * + * Checks: + * + * * The string table is not empty. + * * The string table starts and ends with NUL (required by ELF spec). + * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the + * string table. + * + * And caches the pointer as &load_info->strtab in @info. + * + * Return: 0 on success, negative error code if a check failed. + */ +static int elf_validity_cache_strtab(struct load_info *info) +{ + Elf_Shdr *str_shdr = &info->sechdrs[info->index.str]; + Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; + char *strtab = (char *)info->hdr + str_shdr->sh_offset; + Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; + int i; + + if (str_shdr->sh_size == 0) { + pr_err("empty symbol string table\n"); + return -ENOEXEC; + } + if (strtab[0] != '\0') { + pr_err("symbol string table missing leading NUL\n"); + return -ENOEXEC; + } + if (strtab[str_shdr->sh_size - 1] != '\0') { + pr_err("symbol string table isn't NUL terminated\n"); + return -ENOEXEC; + } + + /* + * Now that we know strtab is correctly structured, check symbol + * starts are inbounds before they're used later. + */ + for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { + if (syms[i].st_name >= str_shdr->sh_size) { + pr_err("symbol name out of bounds in string table"); + return -ENOEXEC; + } + } + + info->strtab = strtab; + return 0; +} + +/* + * Check userspace passed ELF module against our expectations, and cache + * useful variables for further processing as we go. + * + * This does basic validity checks against section offsets and sizes, the + * section name string table, and the indices used for it (sh_name). + * + * As a last step, since we're already checking the ELF sections we cache + * useful variables which will be used later for our convenience: + * + * o pointers to section headers + * o cache the modinfo symbol section + * o cache the string symbol section + * o cache the module section + * + * As a last step we set info->mod to the temporary copy of the module in + * info->hdr. The final one will be allocated in move_module(). Any + * modifications we make to our copy of the module will be carried over + * to the final minted module. + */ +static int elf_validity_cache_copy(struct load_info *info, int flags) +{ + int err; + + err = elf_validity_cache_sechdrs(info); + if (err < 0) + return err; + err = elf_validity_cache_secstrings(info); + if (err < 0) + return err; + err = elf_validity_cache_index(info, flags); + if (err < 0) + return err; + err = elf_validity_cache_strtab(info); + if (err < 0) + return err; + + /* This is temporary: point mod into copy of data. */ + info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; + + /* + * If we didn't load the .modinfo 'name' field earlier, fall back to + * on-disk struct mod 'name' field. + */ + if (!info->name) + info->name = info->mod->name; + + return 0; } #define COPY_CHUNK_SIZE (16*PAGE_SIZE) diff --git a/kernel/notifier.c b/kernel/notifier.c index b3ce28f39eb6..2f9fe7c30287 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -5,19 +5,11 @@ #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/vmalloc.h> -#include <linux/reboot.h> #define CREATE_TRACE_POINTS #include <trace/events/notifier.h> /* - * Notifier list for kernel code which wants to be called - * at shutdown. This is used to stop any idling DMA operations - * and the like. - */ -BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); - -/* * Notifier chain core routines. The exported routines below * are layered on top of these, with appropriate locking added. */ diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index e35829d36039..1f87aa01ba44 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -685,8 +685,11 @@ static void power_down(void) } fallthrough; case HIBERNATION_SHUTDOWN: - if (kernel_can_power_off()) + if (kernel_can_power_off()) { + entering_platform_hibernation = true; kernel_power_off(); + entering_platform_hibernation = false; + } break; } kernel_halt(); diff --git a/kernel/reboot.c b/kernel/reboot.c index f05dbde2c93f..a701000bab34 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -72,6 +72,13 @@ static bool poweroff_fallback_to_halt; */ void __weak (*pm_power_off)(void); +/* + * Notifier list for kernel code which wants to be called + * at shutdown. This is used to stop any idling DMA operations + * and the like. + */ +static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); + /** * emergency_restart - reboot the system * @@ -1130,7 +1137,7 @@ static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char val = REBOOT_UNDEFINED_STR; } - return sprintf(buf, "%s\n", val); + return sysfs_emit(buf, "%s\n", val); } static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) @@ -1160,7 +1167,7 @@ static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode); #ifdef CONFIG_X86 static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", reboot_force); + return sysfs_emit(buf, "%d\n", reboot_force); } static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) @@ -1207,7 +1214,7 @@ static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char val = REBOOT_UNDEFINED_STR; } - return sprintf(buf, "%s\n", val); + return sysfs_emit(buf, "%s\n", val); } static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) @@ -1240,7 +1247,7 @@ static struct kobj_attribute reboot_type_attr = __ATTR_RW(type); #ifdef CONFIG_SMP static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", reboot_cpu); + return sysfs_emit(buf, "%d\n", reboot_cpu); } static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) diff --git a/kernel/resource.c b/kernel/resource.c index d2c8143ae4ff..c9fd26c06345 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -50,17 +50,35 @@ EXPORT_SYMBOL(iomem_resource); static DEFINE_RWLOCK(resource_lock); -static struct resource *next_resource(struct resource *p, bool skip_children) +/* + * Return the next node of @p in pre-order tree traversal. If + * @skip_children is true, skip the descendant nodes of @p in + * traversal. If @p is a descendant of @subtree_root, only traverse + * the subtree under @subtree_root. + */ +static struct resource *next_resource(struct resource *p, bool skip_children, + struct resource *subtree_root) { if (!skip_children && p->child) return p->child; - while (!p->sibling && p->parent) + while (!p->sibling && p->parent) { p = p->parent; + if (p == subtree_root) + return NULL; + } return p->sibling; } +/* + * Traverse the resource subtree under @_root in pre-order, excluding + * @_root itself. + * + * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop. + * And it is referenced to avoid unused variable warning. + */ #define for_each_resource(_root, _p, _skip_children) \ - for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children)) + for (typeof(_root) __root = (_root), __p = _p = __root->child; \ + __p && _p; _p = next_resource(_p, _skip_children, __root)) #ifdef CONFIG_PROC_FS @@ -88,7 +106,7 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos) (*pos)++; - return (void *)next_resource(p, false); + return (void *)next_resource(p, false, NULL); } static void r_stop(struct seq_file *m, void *v) @@ -297,6 +315,11 @@ int release_resource(struct resource *old) EXPORT_SYMBOL(release_resource); +static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc) +{ + return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc); +} + /** * find_next_iomem_res - Finds the lowest iomem resource that covers part of * [@start..@end]. @@ -339,13 +362,9 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, if (p->end < start) continue; - if ((p->flags & flags) != flags) - continue; - if ((desc != IORES_DESC_NONE) && (desc != p->desc)) - continue; - /* Found a match, break */ - break; + if (is_type_match(p, flags, desc)) + break; } if (p) { @@ -537,21 +556,18 @@ static int __region_intersects(struct resource *parent, resource_size_t start, size_t size, unsigned long flags, unsigned long desc) { - resource_size_t ostart, oend; int type = 0; int other = 0; struct resource *p, *dp; - bool is_type, covered; - struct resource res; + struct resource res, o; + bool covered; res.start = start; res.end = start + size - 1; for (p = parent->child; p ; p = p->sibling) { - if (!resource_overlaps(p, &res)) + if (!resource_intersection(p, &res, &o)) continue; - is_type = (p->flags & flags) == flags && - (desc == IORES_DESC_NONE || desc == p->desc); - if (is_type) { + if (is_type_match(p, flags, desc)) { type++; continue; } @@ -568,27 +584,23 @@ static int __region_intersects(struct resource *parent, resource_size_t start, * |-- "System RAM" --||-- "CXL Window 0a" --| */ covered = false; - ostart = max(res.start, p->start); - oend = min(res.end, p->end); for_each_resource(p, dp, false) { if (!resource_overlaps(dp, &res)) continue; - is_type = (dp->flags & flags) == flags && - (desc == IORES_DESC_NONE || desc == dp->desc); - if (is_type) { + if (is_type_match(dp, flags, desc)) { type++; /* - * Range from 'ostart' to 'dp->start' + * Range from 'o.start' to 'dp->start' * isn't covered by matched resource. */ - if (dp->start > ostart) + if (dp->start > o.start) break; - if (dp->end >= oend) { + if (dp->end >= o.end) { covered = true; break; } /* Remove covered range */ - ostart = max(ostart, dp->end + 1); + o.start = max(o.start, dp->end + 1); } } if (!covered) @@ -744,7 +756,7 @@ EXPORT_SYMBOL_GPL(find_resource_space); * @root: root resource descriptor * @old: resource descriptor desired by caller * @newsize: new size of the resource descriptor - * @constraint: the size and alignment constraints to be met. + * @constraint: the memory range and alignment constraints to be met. */ static int reallocate_resource(struct resource *root, struct resource *old, resource_size_t newsize, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3ef047ed9705..be62f0ea1814 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2552,6 +2552,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; + if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) + trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e08aee34ef63..da748b7cbc4d 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -462,17 +462,29 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) bh_off ? 'b' : '.'; - switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | + switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED)) { + case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED: + need_resched = 'B'; + break; case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: need_resched = 'N'; break; + case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED: + need_resched = 'L'; + break; + case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY: + need_resched = 'b'; + break; case TRACE_FLAG_NEED_RESCHED: need_resched = 'n'; break; case TRACE_FLAG_PREEMPT_RESCHED: need_resched = 'p'; break; + case TRACE_FLAG_NEED_RESCHED_LAZY: + need_resched = 'l'; + break; default: need_resched = '.'; break; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5a93d4c446b8..41e0f7e9fa35 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -998,6 +998,7 @@ static int proc_watchdog_common(int which, const struct ctl_table *table, int wr mutex_lock(&watchdog_mutex); + old = *param; if (!write) { /* * On read synchronize the userspace interface. This is a @@ -1005,8 +1006,8 @@ static int proc_watchdog_common(int which, const struct ctl_table *table, int wr */ *param = (watchdog_enabled & which) != 0; err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + *param = old; } else { - old = READ_ONCE(*param); err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!err && old != READ_ONCE(*param)) proc_watchdog_update(); |