aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/smp.c140
-rw-r--r--arch/arm/kernel/topology.c26
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S8
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm/kernel/vmlinux.lds.h127
6 files changed, 104 insertions, 206 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ee514034c0a1..698b6f636156 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -18,7 +18,6 @@
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
-#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5d9da61eff62..48099c6e1e4a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/atomic.h>
#include <asm/bugs.h>
@@ -65,18 +66,26 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ NR_IPI,
/*
* CPU_BACKTRACE is special and not included in NR_IPI
* or tracable with trace_ipi_*
*/
- IPI_CPU_BACKTRACE,
+ IPI_CPU_BACKTRACE = NR_IPI,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
* not be usable by the kernel. Please keep the above limited
* to at most 8 entries.
*/
+ MAX_IPI
};
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops __ro_after_init;
@@ -226,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu)
return cpu != 0;
}
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -247,6 +267,7 @@ int __cpu_disable(void)
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
@@ -422,6 +443,8 @@ asmlinkage void secondary_start_kernel(void)
notify_cpu_starting(cpu);
+ ipi_setup(cpu);
+
calibrate_delay();
smp_store_cpu_info(cpu);
@@ -500,14 +523,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- if (!__smp_cross_call)
- __smp_cross_call = fn;
-}
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -519,38 +534,28 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_COMPLETION, "completion interrupts"),
};
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
-{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
- __smp_cross_call(target, ipinr);
-}
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
+ unsigned int irq;
+
+ if (!ipi_desc[i])
+ continue;
+
+ irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
}
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
-
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
-}
-
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
@@ -627,15 +632,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
handle_IPI(ipinr, regs);
}
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
+ if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
switch (ipinr) {
case IPI_WAKEUP:
@@ -643,9 +645,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
@@ -654,36 +654,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
break;
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
irq_work_run();
- irq_exit();
break;
#endif
case IPI_COMPLETION:
- irq_enter();
ipi_complete(cpu);
- irq_exit();
break;
case IPI_CPU_BACKTRACE:
printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
+ nmi_cpu_backtrace(get_irq_regs());
printk_nmi_exit();
break;
@@ -695,9 +685,67 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+}
+
+/* Legacy version, should go away once all irqchips have been converted */
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ do_handle_IPI(ipinr);
+ irq_exit();
+
set_irq_regs(old_regs);
}
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &irq_stat);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
+}
+
void smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
@@ -805,7 +853,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- __smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index b5adaf744630..ef0058de432b 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -178,15 +178,6 @@ static inline void update_cpu_capacity(unsigned int cpuid) {}
#endif
/*
- * The current assumption is that we can power gate each core independently.
- * This will be superseded by DT binding once available.
- */
-const struct cpumask *cpu_corepower_mask(int cpu)
-{
- return &cpu_topology[cpu].thread_sibling;
-}
-
-/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
* which prevents simultaneous write access to cpu_topology array
@@ -241,20 +232,6 @@ topology_populated:
update_siblings_masks(cpuid);
}
-static inline int cpu_corepower_flags(void)
-{
- return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
-}
-
-static struct sched_domain_topology_level arm_topology[] = {
-#ifdef CONFIG_SCHED_MC
- { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
-};
-
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
@@ -265,7 +242,4 @@ void __init init_cpu_topology(void)
smp_wmb();
parse_dt_topology();
-
- /* Set scheduler topology descriptor */
- set_sched_topology(arm_topology);
}
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 6d2be994ae58..50136828f5b5 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -9,15 +9,13 @@
#include <linux/sizes.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
-#include "vmlinux.lds.h"
-
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -152,6 +150,10 @@ SECTIONS
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
/*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7f24bc08403e..5f4922e858d0 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -9,15 +9,13 @@
#else
#include <linux/pgtable.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
-#include "vmlinux.lds.h"
-
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -151,6 +149,10 @@ SECTIONS
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
deleted file mode 100644
index 381a8e105fa5..000000000000
--- a/arch/arm/kernel/vmlinux.lds.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define ARM_CPU_DISCARD(x)
-#define ARM_CPU_KEEP(x) x
-#else
-#define ARM_CPU_DISCARD(x) x
-#define ARM_CPU_KEEP(x)
-#endif
-
-#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
-#define ARM_EXIT_KEEP(x) x
-#define ARM_EXIT_DISCARD(x)
-#else
-#define ARM_EXIT_KEEP(x)
-#define ARM_EXIT_DISCARD(x) x
-#endif
-
-#ifdef CONFIG_MMU
-#define ARM_MMU_KEEP(x) x
-#define ARM_MMU_DISCARD(x)
-#else
-#define ARM_MMU_KEEP(x)
-#define ARM_MMU_DISCARD(x) x
-#endif
-
-#define PROC_INFO \
- . = ALIGN(4); \
- __proc_info_begin = .; \
- *(.proc.info.init) \
- __proc_info_end = .;
-
-#define IDMAP_TEXT \
- ALIGN_FUNCTION(); \
- __idmap_text_start = .; \
- *(.idmap.text) \
- __idmap_text_end = .; \
-
-#define ARM_DISCARD \
- *(.ARM.exidx.exit.text) \
- *(.ARM.extab.exit.text) \
- *(.ARM.exidx.text.exit) \
- *(.ARM.extab.text.exit) \
- ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
- ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
- ARM_EXIT_DISCARD(EXIT_TEXT) \
- ARM_EXIT_DISCARD(EXIT_DATA) \
- EXIT_CALL \
- ARM_MMU_DISCARD(*(.text.fixup)) \
- ARM_MMU_DISCARD(*(__ex_table)) \
- *(.discard) \
- *(.discard.*)
-
-#define ARM_TEXT \
- IDMAP_TEXT \
- __entry_text_start = .; \
- *(.entry.text) \
- __entry_text_end = .; \
- IRQENTRY_TEXT \
- SOFTIRQENTRY_TEXT \
- TEXT_TEXT \
- SCHED_TEXT \
- CPUIDLE_TEXT \
- LOCK_TEXT \
- KPROBES_TEXT \
- *(.gnu.warning) \
- *(.glue_7) \
- *(.glue_7t) \
- . = ALIGN(4); \
- *(.got) /* Global offset table */ \
- ARM_CPU_KEEP(PROC_INFO)
-
-/* Stack unwinding tables */
-#define ARM_UNWIND_SECTIONS \
- . = ALIGN(8); \
- .ARM.unwind_idx : { \
- __start_unwind_idx = .; \
- *(.ARM.exidx*) \
- __stop_unwind_idx = .; \
- } \
- .ARM.unwind_tab : { \
- __start_unwind_tab = .; \
- *(.ARM.extab*) \
- __stop_unwind_tab = .; \
- }
-
-/*
- * The vectors and stubs are relocatable code, and the
- * only thing that matters is their relative offsets
- */
-#define ARM_VECTORS \
- __vectors_start = .; \
- .vectors 0xffff0000 : AT(__vectors_start) { \
- *(.vectors) \
- } \
- . = __vectors_start + SIZEOF(.vectors); \
- __vectors_end = .; \
- \
- __stubs_start = .; \
- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \
- *(.stubs) \
- } \
- . = __stubs_start + SIZEOF(.stubs); \
- __stubs_end = .; \
- \
- PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
-
-#define ARM_TCM \
- __itcm_start = ALIGN(4); \
- .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \
- __sitcm_text = .; \
- *(.tcm.text) \
- *(.tcm.rodata) \
- . = ALIGN(4); \
- __eitcm_text = .; \
- } \
- . = __itcm_start + SIZEOF(.text_itcm); \
- \
- __dtcm_start = .; \
- .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \
- __sdtcm_data = .; \
- *(.tcm.data) \
- . = ALIGN(4); \
- __edtcm_data = .; \
- } \
- . = __dtcm_start + SIZEOF(.data_dtcm);