aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/dma/debug.c20
-rw-r--r--kernel/fork.c26
-rw-r--r--kernel/module/internal.h7
-rw-r--r--kernel/module/main.c569
-rw-r--r--kernel/signal.c7
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_output.c14
9 files changed, 486 insertions, 164 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6e34b52cb5ce..b605334f8ee6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2866,7 +2866,6 @@ static struct attribute *cpuhp_cpu_attrs[] = {
static const struct attribute_group cpuhp_cpu_attr_group = {
.attrs = cpuhp_cpu_attrs,
.name = "hotplug",
- NULL
};
static ssize_t states_show(struct device *dev,
@@ -2898,7 +2897,6 @@ static struct attribute *cpuhp_cpu_root_attrs[] = {
static const struct attribute_group cpuhp_cpu_root_attr_group = {
.attrs = cpuhp_cpu_root_attrs,
.name = "hotplug",
- NULL
};
#ifdef CONFIG_HOTPLUG_SMT
@@ -3020,7 +3018,6 @@ static struct attribute *cpuhp_smt_attrs[] = {
static const struct attribute_group cpuhp_smt_attr_group = {
.attrs = cpuhp_smt_attrs,
.name = "smt",
- NULL
};
static int __init cpu_smt_sysfs_init(void)
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 295396226f31..e43c6de2bce4 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -1219,7 +1219,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
entry->dev = dev;
entry->type = dma_debug_single;
- entry->paddr = page_to_phys(page);
+ entry->paddr = page_to_phys(page) + offset;
entry->dev_addr = dma_addr;
entry->size = size;
entry->direction = direction;
@@ -1377,6 +1377,18 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
}
+static phys_addr_t virt_to_paddr(void *virt)
+{
+ struct page *page;
+
+ if (is_vmalloc_addr(virt))
+ page = vmalloc_to_page(virt);
+ else
+ page = virt_to_page(virt);
+
+ return page_to_phys(page) + offset_in_page(virt);
+}
+
void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt,
unsigned long attrs)
@@ -1399,8 +1411,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent;
entry->dev = dev;
- entry->paddr = page_to_phys((is_vmalloc_addr(virt) ?
- vmalloc_to_page(virt) : virt_to_page(virt)));
+ entry->paddr = virt_to_paddr(virt);
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL;
@@ -1423,8 +1434,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
return;
- ref.paddr = page_to_phys((is_vmalloc_addr(virt) ?
- vmalloc_to_page(virt) : virt_to_page(virt)));
+ ref.paddr = virt_to_paddr(virt);
if (unlikely(dma_debug_disabled()))
return;
diff --git a/kernel/fork.c b/kernel/fork.c
index f253e81d0c28..1450b461d196 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -621,6 +621,12 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
exe_file = get_mm_exe_file(oldmm);
RCU_INIT_POINTER(mm->exe_file, exe_file);
+ /*
+ * We depend on the oldmm having properly denied write access to the
+ * exe_file already.
+ */
+ if (exe_file && deny_write_access(exe_file))
+ pr_warn_once("deny_write_access() failed in %s\n", __func__);
}
#ifdef CONFIG_MMU
@@ -1413,11 +1419,20 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
*/
old_exe_file = rcu_dereference_raw(mm->exe_file);
- if (new_exe_file)
+ if (new_exe_file) {
+ /*
+ * We expect the caller (i.e., sys_execve) to already denied
+ * write access, so this is unlikely to fail.
+ */
+ if (unlikely(deny_write_access(new_exe_file)))
+ return -EACCES;
get_file(new_exe_file);
+ }
rcu_assign_pointer(mm->exe_file, new_exe_file);
- if (old_exe_file)
+ if (old_exe_file) {
+ allow_write_access(old_exe_file);
fput(old_exe_file);
+ }
return 0;
}
@@ -1456,6 +1471,9 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
return ret;
}
+ ret = deny_write_access(new_exe_file);
+ if (ret)
+ return -EACCES;
get_file(new_exe_file);
/* set the new file */
@@ -1464,8 +1482,10 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
rcu_assign_pointer(mm->exe_file, new_exe_file);
mmap_write_unlock(mm);
- if (old_exe_file)
+ if (old_exe_file) {
+ allow_write_access(old_exe_file);
fput(old_exe_file);
+ }
return 0;
}
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index 2ebece8a789f..daef2be83902 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -80,7 +80,12 @@ struct load_info {
unsigned int used_pages;
#endif
struct {
- unsigned int sym, str, mod, vers, info, pcpu;
+ unsigned int sym;
+ unsigned int str;
+ unsigned int mod;
+ unsigned int vers;
+ unsigned int info;
+ unsigned int pcpu;
} index;
};
diff --git a/kernel/module/main.c b/kernel/module/main.c
index d2e1b8976c7b..5399c182b3cb 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -195,6 +195,38 @@ static unsigned int find_sec(const struct load_info *info, const char *name)
return 0;
}
+/**
+ * find_any_unique_sec() - Find a unique section index by name
+ * @info: Load info for the module to scan
+ * @name: Name of the section we're looking for
+ *
+ * Locates a unique section by name. Ignores SHF_ALLOC.
+ *
+ * Return: Section index if found uniquely, zero if absent, negative count
+ * of total instances if multiple were found.
+ */
+static int find_any_unique_sec(const struct load_info *info, const char *name)
+{
+ unsigned int idx;
+ unsigned int count = 0;
+ int i;
+
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ if (strcmp(info->secstrings + info->sechdrs[i].sh_name,
+ name) == 0) {
+ count++;
+ idx = i;
+ }
+ }
+ if (count == 1) {
+ return idx;
+ } else if (count == 0) {
+ return 0;
+ } else {
+ return -count;
+ }
+}
+
/* Find a module section, or NULL. */
static void *section_addr(const struct load_info *info, const char *name)
{
@@ -1679,7 +1711,7 @@ bool __weak module_exit_section(const char *name)
return strstarts(name, ".exit");
}
-static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
+static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr)
{
#if defined(CONFIG_64BIT)
unsigned long long secend;
@@ -1698,62 +1730,80 @@ static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
return 0;
}
-/*
- * Check userspace passed ELF module against our expectations, and cache
- * useful variables for further processing as we go.
- *
- * This does basic validity checks against section offsets and sizes, the
- * section name string table, and the indices used for it (sh_name).
+/**
+ * elf_validity_ehdr() - Checks an ELF header for module validity
+ * @info: Load info containing the ELF header to check
*
- * As a last step, since we're already checking the ELF sections we cache
- * useful variables which will be used later for our convenience:
+ * Checks whether an ELF header could belong to a valid module. Checks:
*
- * o pointers to section headers
- * o cache the modinfo symbol section
- * o cache the string symbol section
- * o cache the module section
+ * * ELF header is within the data the user provided
+ * * ELF magic is present
+ * * It is relocatable (not final linked, not core file, etc.)
+ * * The header's machine type matches what the architecture expects.
+ * * Optional arch-specific hook for other properties
+ * - module_elf_check_arch() is currently only used by PPC to check
+ * ELF ABI version, but may be used by others in the future.
*
- * As a last step we set info->mod to the temporary copy of the module in
- * info->hdr. The final one will be allocated in move_module(). Any
- * modifications we make to our copy of the module will be carried over
- * to the final minted module.
+ * Return: %0 if valid, %-ENOEXEC on failure.
*/
-static int elf_validity_cache_copy(struct load_info *info, int flags)
+static int elf_validity_ehdr(const struct load_info *info)
{
- unsigned int i;
- Elf_Shdr *shdr, *strhdr;
- int err;
- unsigned int num_mod_secs = 0, mod_idx;
- unsigned int num_info_secs = 0, info_idx;
- unsigned int num_sym_secs = 0, sym_idx;
-
if (info->len < sizeof(*(info->hdr))) {
pr_err("Invalid ELF header len %lu\n", info->len);
- goto no_exec;
+ return -ENOEXEC;
}
-
if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) {
pr_err("Invalid ELF header magic: != %s\n", ELFMAG);
- goto no_exec;
+ return -ENOEXEC;
}
if (info->hdr->e_type != ET_REL) {
pr_err("Invalid ELF header type: %u != %u\n",
info->hdr->e_type, ET_REL);
- goto no_exec;
+ return -ENOEXEC;
}
if (!elf_check_arch(info->hdr)) {
pr_err("Invalid architecture in ELF header: %u\n",
info->hdr->e_machine);
- goto no_exec;
+ return -ENOEXEC;
}
if (!module_elf_check_arch(info->hdr)) {
pr_err("Invalid module architecture in ELF header: %u\n",
info->hdr->e_machine);
- goto no_exec;
+ return -ENOEXEC;
}
+ return 0;
+}
+
+/**
+ * elf_validity_cache_sechdrs() - Cache section headers if valid
+ * @info: Load info to compute section headers from
+ *
+ * Checks:
+ *
+ * * ELF header is valid (see elf_validity_ehdr())
+ * * Section headers are the size we expect
+ * * Section array fits in the user provided data
+ * * Section index 0 is NULL
+ * * Section contents are inbounds
+ *
+ * Then updates @info with a &load_info->sechdrs pointer if valid.
+ *
+ * Return: %0 if valid, negative error code if validation failed.
+ */
+static int elf_validity_cache_sechdrs(struct load_info *info)
+{
+ Elf_Shdr *sechdrs;
+ Elf_Shdr *shdr;
+ int i;
+ int err;
+
+ err = elf_validity_ehdr(info);
+ if (err < 0)
+ return err;
+
if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) {
pr_err("Invalid ELF section header size\n");
- goto no_exec;
+ return -ENOEXEC;
}
/*
@@ -1765,10 +1815,66 @@ static int elf_validity_cache_copy(struct load_info *info, int flags)
|| (info->hdr->e_shnum * sizeof(Elf_Shdr) >
info->len - info->hdr->e_shoff)) {
pr_err("Invalid ELF section header overflow\n");
- goto no_exec;
+ return -ENOEXEC;
}
- info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
+ sechdrs = (void *)info->hdr + info->hdr->e_shoff;
+
+ /*
+ * The code assumes that section 0 has a length of zero and
+ * an addr of zero, so check for it.
+ */
+ if (sechdrs[0].sh_type != SHT_NULL
+ || sechdrs[0].sh_size != 0
+ || sechdrs[0].sh_addr != 0) {
+ pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
+ sechdrs[0].sh_type);
+ return -ENOEXEC;
+ }
+
+ /* Validate contents are inbounds */
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ shdr = &sechdrs[i];
+ switch (shdr->sh_type) {
+ case SHT_NULL:
+ case SHT_NOBITS:
+ /* No contents, offset/size don't mean anything */
+ continue;
+ default:
+ err = validate_section_offset(info, shdr);
+ if (err < 0) {
+ pr_err("Invalid ELF section in module (section %u type %u)\n",
+ i, shdr->sh_type);
+ return err;
+ }
+ }
+ }
+
+ info->sechdrs = sechdrs;
+
+ return 0;
+}
+
+/**
+ * elf_validity_cache_secstrings() - Caches section names if valid
+ * @info: Load info to cache section names from. Must have valid sechdrs.
+ *
+ * Specifically checks:
+ *
+ * * Section name table index is inbounds of section headers
+ * * Section name table is not empty
+ * * Section name table is NUL terminated
+ * * All section name offsets are inbounds of the section
+ *
+ * Then updates @info with a &load_info->secstrings pointer if valid.
+ *
+ * Return: %0 if valid, negative error code if validation failed.
+ */
+static int elf_validity_cache_secstrings(struct load_info *info)
+{
+ Elf_Shdr *strhdr, *shdr;
+ char *secstrings;
+ int i;
/*
* Verify if the section name table index is valid.
@@ -1778,165 +1884,234 @@ static int elf_validity_cache_copy(struct load_info *info, int flags)
pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n",
info->hdr->e_shstrndx, info->hdr->e_shstrndx,
info->hdr->e_shnum);
- goto no_exec;
+ return -ENOEXEC;
}
strhdr = &info->sechdrs[info->hdr->e_shstrndx];
- err = validate_section_offset(info, strhdr);
- if (err < 0) {
- pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type);
- return err;
- }
/*
* The section name table must be NUL-terminated, as required
* by the spec. This makes strcmp and pr_* calls that access
* strings in the section safe.
*/
- info->secstrings = (void *)info->hdr + strhdr->sh_offset;
+ secstrings = (void *)info->hdr + strhdr->sh_offset;
if (strhdr->sh_size == 0) {
pr_err("empty section name table\n");
- goto no_exec;
+ return -ENOEXEC;
}
- if (info->secstrings[strhdr->sh_size - 1] != '\0') {
+ if (secstrings[strhdr->sh_size - 1] != '\0') {
pr_err("ELF Spec violation: section name table isn't null terminated\n");
- goto no_exec;
- }
-
- /*
- * The code assumes that section 0 has a length of zero and
- * an addr of zero, so check for it.
- */
- if (info->sechdrs[0].sh_type != SHT_NULL
- || info->sechdrs[0].sh_size != 0
- || info->sechdrs[0].sh_addr != 0) {
- pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
- info->sechdrs[0].sh_type);
- goto no_exec;
+ return -ENOEXEC;
}
- for (i = 1; i < info->hdr->e_shnum; i++) {
+ for (i = 0; i < info->hdr->e_shnum; i++) {
shdr = &info->sechdrs[i];
- switch (shdr->sh_type) {
- case SHT_NULL:
- case SHT_NOBITS:
+ /* SHT_NULL means sh_name has an undefined value */
+ if (shdr->sh_type == SHT_NULL)
continue;
- case SHT_SYMTAB:
- if (shdr->sh_link == SHN_UNDEF
- || shdr->sh_link >= info->hdr->e_shnum) {
- pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n",
- shdr->sh_link, shdr->sh_link,
- info->hdr->e_shnum);
- goto no_exec;
- }
- num_sym_secs++;
- sym_idx = i;
- fallthrough;
- default:
- err = validate_section_offset(info, shdr);
- if (err < 0) {
- pr_err("Invalid ELF section in module (section %u type %u)\n",
- i, shdr->sh_type);
- return err;
- }
- if (strcmp(info->secstrings + shdr->sh_name,
- ".gnu.linkonce.this_module") == 0) {
- num_mod_secs++;
- mod_idx = i;
- } else if (strcmp(info->secstrings + shdr->sh_name,
- ".modinfo") == 0) {
- num_info_secs++;
- info_idx = i;
- }
-
- if (shdr->sh_flags & SHF_ALLOC) {
- if (shdr->sh_name >= strhdr->sh_size) {
- pr_err("Invalid ELF section name in module (section %u type %u)\n",
- i, shdr->sh_type);
- return -ENOEXEC;
- }
- }
- break;
+ if (shdr->sh_name >= strhdr->sh_size) {
+ pr_err("Invalid ELF section name in module (section %u type %u)\n",
+ i, shdr->sh_type);
+ return -ENOEXEC;
}
}
- if (num_info_secs > 1) {
+ info->secstrings = secstrings;
+ return 0;
+}
+
+/**
+ * elf_validity_cache_index_info() - Validate and cache modinfo section
+ * @info: Load info to populate the modinfo index on.
+ * Must have &load_info->sechdrs and &load_info->secstrings populated
+ *
+ * Checks that if there is a .modinfo section, it is unique.
+ * Then, it caches its index in &load_info->index.info.
+ * Finally, it tries to populate the name to improve error messages.
+ *
+ * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found.
+ */
+static int elf_validity_cache_index_info(struct load_info *info)
+{
+ int info_idx;
+
+ info_idx = find_any_unique_sec(info, ".modinfo");
+
+ if (info_idx == 0)
+ /* Early return, no .modinfo */
+ return 0;
+
+ if (info_idx < 0) {
pr_err("Only one .modinfo section must exist.\n");
- goto no_exec;
- } else if (num_info_secs == 1) {
- /* Try to find a name early so we can log errors with a module name */
- info->index.info = info_idx;
- info->name = get_modinfo(info, "name");
+ return -ENOEXEC;
}
- if (num_sym_secs != 1) {
- pr_warn("%s: module has no symbols (stripped?)\n",
- info->name ?: "(missing .modinfo section or name field)");
- goto no_exec;
- }
+ info->index.info = info_idx;
+ /* Try to find a name early so we can log errors with a module name */
+ info->name = get_modinfo(info, "name");
- /* Sets internal symbols and strings. */
- info->index.sym = sym_idx;
- shdr = &info->sechdrs[sym_idx];
- info->index.str = shdr->sh_link;
- info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset;
+ return 0;
+}
- /*
- * The ".gnu.linkonce.this_module" ELF section is special. It is
- * what modpost uses to refer to __this_module and let's use rely
- * on THIS_MODULE to point to &__this_module properly. The kernel's
- * modpost declares it on each modules's *.mod.c file. If the struct
- * module of the kernel changes a full kernel rebuild is required.
- *
- * We have a few expectaions for this special section, the following
- * code validates all this for us:
- *
- * o Only one section must exist
- * o We expect the kernel to always have to allocate it: SHF_ALLOC
- * o The section size must match the kernel's run time's struct module
- * size
- */
- if (num_mod_secs != 1) {
- pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n",
+/**
+ * elf_validity_cache_index_mod() - Validates and caches this_module section
+ * @info: Load info to cache this_module on.
+ * Must have &load_info->sechdrs and &load_info->secstrings populated
+ *
+ * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost
+ * uses to refer to __this_module and let's use rely on THIS_MODULE to point
+ * to &__this_module properly. The kernel's modpost declares it on each
+ * modules's *.mod.c file. If the struct module of the kernel changes a full
+ * kernel rebuild is required.
+ *
+ * We have a few expectations for this special section, this function
+ * validates all this for us:
+ *
+ * * The section has contents
+ * * The section is unique
+ * * We expect the kernel to always have to allocate it: SHF_ALLOC
+ * * The section size must match the kernel's run time's struct module
+ * size
+ *
+ * If all checks pass, the index will be cached in &load_info->index.mod
+ *
+ * Return: %0 on validation success, %-ENOEXEC on failure
+ */
+static int elf_validity_cache_index_mod(struct load_info *info)
+{
+ Elf_Shdr *shdr;
+ int mod_idx;
+
+ mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module");
+ if (mod_idx <= 0) {
+ pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n",
info->name ?: "(missing .modinfo section or name field)");
- goto no_exec;
+ return -ENOEXEC;
}
shdr = &info->sechdrs[mod_idx];
- /*
- * This is already implied on the switch above, however let's be
- * pedantic about it.
- */
if (shdr->sh_type == SHT_NOBITS) {
pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n",
info->name ?: "(missing .modinfo section or name field)");
- goto no_exec;
+ return -ENOEXEC;
}
if (!(shdr->sh_flags & SHF_ALLOC)) {
pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n",
info->name ?: "(missing .modinfo section or name field)");
- goto no_exec;
+ return -ENOEXEC;
}
if (shdr->sh_size != sizeof(struct module)) {
pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n",
info->name ?: "(missing .modinfo section or name field)");
- goto no_exec;
+ return -ENOEXEC;
}
info->index.mod = mod_idx;
- /* This is temporary: point mod into copy of data. */
- info->mod = (void *)info->hdr + shdr->sh_offset;
+ return 0;
+}
- /*
- * If we didn't load the .modinfo 'name' field earlier, fall back to
- * on-disk struct mod 'name' field.
- */
- if (!info->name)
- info->name = info->mod->name;
+/**
+ * elf_validity_cache_index_sym() - Validate and cache symtab index
+ * @info: Load info to cache symtab index in.
+ * Must have &load_info->sechdrs and &load_info->secstrings populated.
+ *
+ * Checks that there is exactly one symbol table, then caches its index in
+ * &load_info->index.sym.
+ *
+ * Return: %0 if valid, %-ENOEXEC on failure.
+ */
+static int elf_validity_cache_index_sym(struct load_info *info)
+{
+ unsigned int sym_idx;
+ unsigned int num_sym_secs = 0;
+ int i;
+
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
+ num_sym_secs++;
+ sym_idx = i;
+ }
+ }
+
+ if (num_sym_secs != 1) {
+ pr_warn("%s: module has no symbols (stripped?)\n",
+ info->name ?: "(missing .modinfo section or name field)");
+ return -ENOEXEC;
+ }
+
+ info->index.sym = sym_idx;
+
+ return 0;
+}
+
+/**
+ * elf_validity_cache_index_str() - Validate and cache strtab index
+ * @info: Load info to cache strtab index in.
+ * Must have &load_info->sechdrs and &load_info->secstrings populated.
+ * Must have &load_info->index.sym populated.
+ *
+ * Looks at the symbol table's associated string table, makes sure it is
+ * in-bounds, and caches it.
+ *
+ * Return: %0 if valid, %-ENOEXEC on failure.
+ */
+static int elf_validity_cache_index_str(struct load_info *info)
+{
+ unsigned int str_idx = info->sechdrs[info->index.sym].sh_link;
+
+ if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) {
+ pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n",
+ str_idx, str_idx, info->hdr->e_shnum);
+ return -ENOEXEC;
+ }
+
+ info->index.str = str_idx;
+ return 0;
+}
+
+/**
+ * elf_validity_cache_index() - Resolve, validate, cache section indices
+ * @info: Load info to read from and update.
+ * &load_info->sechdrs and &load_info->secstrings must be populated.
+ * @flags: Load flags, relevant to suppress version loading, see
+ * uapi/linux/module.h
+ *
+ * Populates &load_info->index, validating as it goes.
+ * See child functions for per-field validation:
+ *
+ * * elf_validity_cache_index_info()
+ * * elf_validity_cache_index_mod()
+ * * elf_validity_cache_index_sym()
+ * * elf_validity_cache_index_str()
+ *
+ * If versioning is not suppressed via flags, load the version index from
+ * a section called "__versions" with no validation.
+ *
+ * If CONFIG_SMP is enabled, load the percpu section by name with no
+ * validation.
+ *
+ * Return: 0 on success, negative error code if an index failed validation.
+ */
+static int elf_validity_cache_index(struct load_info *info, int flags)
+{
+ int err;
+
+ err = elf_validity_cache_index_info(info);
+ if (err < 0)
+ return err;
+ err = elf_validity_cache_index_mod(info);
+ if (err < 0)
+ return err;
+ err = elf_validity_cache_index_sym(info);
+ if (err < 0)
+ return err;
+ err = elf_validity_cache_index_str(info);
+ if (err < 0)
+ return err;
if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
info->index.vers = 0; /* Pretend no __versions section! */
@@ -1946,9 +2121,109 @@ static int elf_validity_cache_copy(struct load_info *info, int flags)
info->index.pcpu = find_pcpusec(info);
return 0;
+}
-no_exec:
- return -ENOEXEC;
+/**
+ * elf_validity_cache_strtab() - Validate and cache symbol string table
+ * @info: Load info to read from and update.
+ * Must have &load_info->sechdrs and &load_info->secstrings populated.
+ * Must have &load_info->index populated.
+ *
+ * Checks:
+ *
+ * * The string table is not empty.
+ * * The string table starts and ends with NUL (required by ELF spec).
+ * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the
+ * string table.
+ *
+ * And caches the pointer as &load_info->strtab in @info.
+ *
+ * Return: 0 on success, negative error code if a check failed.
+ */
+static int elf_validity_cache_strtab(struct load_info *info)
+{
+ Elf_Shdr *str_shdr = &info->sechdrs[info->index.str];
+ Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym];
+ char *strtab = (char *)info->hdr + str_shdr->sh_offset;
+ Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset;
+ int i;
+
+ if (str_shdr->sh_size == 0) {
+ pr_err("empty symbol string table\n");
+ return -ENOEXEC;
+ }
+ if (strtab[0] != '\0') {
+ pr_err("symbol string table missing leading NUL\n");
+ return -ENOEXEC;
+ }
+ if (strtab[str_shdr->sh_size - 1] != '\0') {
+ pr_err("symbol string table isn't NUL terminated\n");
+ return -ENOEXEC;
+ }
+
+ /*
+ * Now that we know strtab is correctly structured, check symbol
+ * starts are inbounds before they're used later.
+ */
+ for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) {
+ if (syms[i].st_name >= str_shdr->sh_size) {
+ pr_err("symbol name out of bounds in string table");
+ return -ENOEXEC;
+ }
+ }
+
+ info->strtab = strtab;
+ return 0;
+}
+
+/*
+ * Check userspace passed ELF module against our expectations, and cache
+ * useful variables for further processing as we go.
+ *
+ * This does basic validity checks against section offsets and sizes, the
+ * section name string table, and the indices used for it (sh_name).
+ *
+ * As a last step, since we're already checking the ELF sections we cache
+ * useful variables which will be used later for our convenience:
+ *
+ * o pointers to section headers
+ * o cache the modinfo symbol section
+ * o cache the string symbol section
+ * o cache the module section
+ *
+ * As a last step we set info->mod to the temporary copy of the module in
+ * info->hdr. The final one will be allocated in move_module(). Any
+ * modifications we make to our copy of the module will be carried over
+ * to the final minted module.
+ */
+static int elf_validity_cache_copy(struct load_info *info, int flags)
+{
+ int err;
+
+ err = elf_validity_cache_sechdrs(info);
+ if (err < 0)
+ return err;
+ err = elf_validity_cache_secstrings(info);
+ if (err < 0)
+ return err;
+ err = elf_validity_cache_index(info, flags);
+ if (err < 0)
+ return err;
+ err = elf_validity_cache_strtab(info);
+ if (err < 0)
+ return err;
+
+ /* This is temporary: point mod into copy of data. */
+ info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
+
+ /*
+ * If we didn't load the .modinfo 'name' field earlier, fall back to
+ * on-disk struct mod 'name' field.
+ */
+ if (!info->name)
+ info->name = info->mod->name;
+
+ return 0;
}
#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
diff --git a/kernel/signal.c b/kernel/signal.c
index 98b65cb35830..989b1cc9116a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1959,14 +1959,15 @@ static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t,
*
* Where type is not PIDTYPE_PID, signals must be delivered to the
* process. In this case, prefer to deliver to current if it is in
- * the same thread group as the target process, which avoids
- * unnecessarily waking up a potentially idle task.
+ * the same thread group as the target process and its sighand is
+ * stable, which avoids unnecessarily waking up a potentially idle task.
*/
static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
{
struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
- if (t && tmr->it_pid_type != PIDTYPE_PID && same_thread_group(t, current))
+ if (t && tmr->it_pid_type != PIDTYPE_PID &&
+ same_thread_group(t, current) && !current->exit_state)
t = current;
return t;
}
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index b550ebe0f03b..163e7a2033b6 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -798,7 +798,7 @@ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
txc->offset = shift_right(ntpdata->time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT);
if (!(ntpdata->time_status & STA_NANO))
- txc->offset = (u32)txc->offset / NSEC_PER_USEC;
+ txc->offset = div_s64(txc->offset, NSEC_PER_USEC);
}
result = ntpdata->time_state;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3ef047ed9705..be62f0ea1814 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2552,6 +2552,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
+ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index e08aee34ef63..da748b7cbc4d 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -462,17 +462,29 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
bh_off ? 'b' :
'.';
- switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
+ switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
TRACE_FLAG_PREEMPT_RESCHED)) {
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'B';
+ break;
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
need_resched = 'N';
break;
+ case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'L';
+ break;
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
+ need_resched = 'b';
+ break;
case TRACE_FLAG_NEED_RESCHED:
need_resched = 'n';
break;
case TRACE_FLAG_PREEMPT_RESCHED:
need_resched = 'p';
break;
+ case TRACE_FLAG_NEED_RESCHED_LAZY:
+ need_resched = 'l';
+ break;
default:
need_resched = '.';
break;