diff options
Diffstat (limited to 'drivers/irqchip/irq-apple-aic.c')
-rw-r--r-- | drivers/irqchip/irq-apple-aic.c | 214 |
1 files changed, 48 insertions, 166 deletions
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index ae3437f03e6c..5c534d9fd2b0 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -210,7 +210,6 @@ FIELD_PREP(AIC_EVENT_NUM, x)) #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x) #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x) -#define AIC_NR_FIQ 6 #define AIC_NR_SWIPI 32 /* @@ -222,11 +221,18 @@ * running at EL2 (with VHE). When the kernel is running at EL1, the * mapping differs and aic_irq_domain_translate() performs the remapping. */ - -#define AIC_TMR_EL0_PHYS AIC_TMR_HV_PHYS -#define AIC_TMR_EL0_VIRT AIC_TMR_HV_VIRT -#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS -#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT +enum fiq_hwirq { + /* Must be ordered as in apple-aic.h */ + AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS, + AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT, + AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS, + AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT, + AIC_CPU_PMU_Effi = AIC_CPU_PMU_E, + AIC_CPU_PMU_Perf = AIC_CPU_PMU_P, + /* No need for this to be discovered from DT */ + AIC_VGIC_MI, + AIC_NR_FIQ +}; static DEFINE_STATIC_KEY_TRUE(use_fast_ipi); @@ -292,7 +298,6 @@ struct aic_irq_chip { void __iomem *base; void __iomem *event; struct irq_domain *hw_domain; - struct irq_domain *ipi_domain; struct { cpumask_t aff; } *fiq_aff[AIC_NR_FIQ]; @@ -307,9 +312,6 @@ struct aic_irq_chip { static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); -static DEFINE_PER_CPU(atomic_t, aic_vipi_flag); -static DEFINE_PER_CPU(atomic_t, aic_vipi_enable); - static struct aic_irq_chip *aic_irqc; static void aic_handle_ipi(struct pt_regs *regs); @@ -384,14 +386,20 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs) /* * vGIC maintenance interrupts end up here too, so we need to check - * for them separately. This should never trigger if KVM is working - * properly, because it will have already taken care of clearing it - * on guest exit before this handler runs. + * for them separately. It should however only trigger when NV is + * in use, and be cleared when coming back from the handler. */ - if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) && - read_sysreg_s(SYS_ICH_MISR_EL2) != 0) { - pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n"); - sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0); + if (is_kernel_in_hyp_mode() && + (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) && + read_sysreg_s(SYS_ICH_MISR_EL2) != 0) { + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_VGIC_MI)); + + if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) && + read_sysreg_s(SYS_ICH_MISR_EL2))) { + pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n"); + sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0); + } } } @@ -751,98 +759,8 @@ static void aic_ipi_send_fast(int cpu) isb(); } -static void aic_ipi_mask(struct irq_data *d) -{ - u32 irq_bit = BIT(irqd_to_hwirq(d)); - - /* No specific ordering requirements needed here. */ - atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable)); -} - -static void aic_ipi_unmask(struct irq_data *d) -{ - struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); - u32 irq_bit = BIT(irqd_to_hwirq(d)); - - atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable)); - - /* - * The atomic_or() above must complete before the atomic_read() - * below to avoid racing aic_ipi_send_mask(). - */ - smp_mb__after_atomic(); - - /* - * If a pending vIPI was unmasked, raise a HW IPI to ourselves. - * No barriers needed here since this is a self-IPI. - */ - if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) { - if (static_branch_likely(&use_fast_ipi)) - aic_ipi_send_fast(smp_processor_id()); - else - aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id())); - } -} - -static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) -{ - struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); - u32 irq_bit = BIT(irqd_to_hwirq(d)); - u32 send = 0; - int cpu; - unsigned long pending; - - for_each_cpu(cpu, mask) { - /* - * This sequence is the mirror of the one in aic_ipi_unmask(); - * see the comment there. Additionally, release semantics - * ensure that the vIPI flag set is ordered after any shared - * memory accesses that precede it. This therefore also pairs - * with the atomic_fetch_andnot in aic_handle_ipi(). - */ - pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu)); - - /* - * The atomic_fetch_or_release() above must complete before the - * atomic_read() below to avoid racing aic_ipi_unmask(). - */ - smp_mb__after_atomic(); - - if (!(pending & irq_bit) && - (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) { - if (static_branch_likely(&use_fast_ipi)) - aic_ipi_send_fast(cpu); - else - send |= AIC_IPI_SEND_CPU(cpu); - } - } - - /* - * The flag writes must complete before the physical IPI is issued - * to another CPU. This is implied by the control dependency on - * the result of atomic_read_acquire() above, which is itself - * already ordered after the vIPI flag write. - */ - if (send) - aic_ic_write(ic, AIC_IPI_SEND, send); -} - -static struct irq_chip ipi_chip = { - .name = "AIC-IPI", - .irq_mask = aic_ipi_mask, - .irq_unmask = aic_ipi_unmask, - .ipi_send_mask = aic_ipi_send_mask, -}; - -/* - * IPI IRQ domain - */ - static void aic_handle_ipi(struct pt_regs *regs) { - int i; - unsigned long enabled, firing; - /* * Ack the IPI. We need to order this after the AIC event read, but * that is enforced by normal MMIO ordering guarantees. @@ -857,27 +775,7 @@ static void aic_handle_ipi(struct pt_regs *regs) aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); } - /* - * The mask read does not need to be ordered. Only we can change - * our own mask anyway, so no races are possible here, as long as - * we are properly in the interrupt handler (which is covered by - * the barrier that is part of the top-level AIC handler's readl()). - */ - enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable)); - - /* - * Clear the IPIs we are about to handle. This pairs with the - * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be - * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and - * before IPI handling code (to avoid races handling vIPIs before they - * are signaled). The former is taken care of by the release semantics - * of the write portion, while the latter is taken care of by the - * acquire semantics of the read portion. - */ - firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled; - - for_each_set_bit(i, &firing, AIC_NR_SWIPI) - generic_handle_domain_irq(aic_irqc->ipi_domain, i); + ipi_mux_process(); /* * No ordering needed here; at worst this just changes the timing of @@ -887,55 +785,24 @@ static void aic_handle_ipi(struct pt_regs *regs) aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); } -static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq, - unsigned int nr_irqs, void *args) -{ - int i; - - for (i = 0; i < nr_irqs; i++) { - irq_set_percpu_devid(virq + i); - irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data, - handle_percpu_devid_irq, NULL, NULL); - } - - return 0; -} - -static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) +static void aic_ipi_send_single(unsigned int cpu) { - /* Not freeing IPIs */ + if (static_branch_likely(&use_fast_ipi)) + aic_ipi_send_fast(cpu); + else + aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu)); } -static const struct irq_domain_ops aic_ipi_domain_ops = { - .alloc = aic_ipi_alloc, - .free = aic_ipi_free, -}; - static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) { - struct irq_domain *ipi_domain; int base_ipi; - ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI, - &aic_ipi_domain_ops, irqc); - if (WARN_ON(!ipi_domain)) + base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single); + if (WARN_ON(base_ipi <= 0)) return -ENODEV; - ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE; - irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); - - base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI, - NUMA_NO_NODE, NULL, false, NULL); - - if (WARN_ON(!base_ipi)) { - irq_domain_remove(ipi_domain); - return -ENODEV; - } - set_smp_ipi_range(base_ipi, AIC_NR_SWIPI); - irqc->ipi_domain = ipi_domain; - return 0; } @@ -1178,6 +1045,21 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p "irqchip/apple-aic/ipi:starting", aic_init_cpu, NULL); + if (is_kernel_in_hyp_mode()) { + struct irq_fwspec mi = { + .fwnode = of_node_to_fwnode(node), + .param_count = 3, + .param = { + [0] = AIC_FIQ, /* This is a lie */ + [1] = AIC_VGIC_MI, + [2] = IRQ_TYPE_LEVEL_HIGH, + }, + }; + + vgic_info.maint_irq = irq_create_fwspec_mapping(&mi); + WARN_ON(!vgic_info.maint_irq); + } + vgic_set_kvm_info(&vgic_info); pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs", |