aboutsummaryrefslogtreecommitdiff
path: root/drivers/irqchip/irq-mips-gic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip/irq-mips-gic.c')
-rw-r--r--drivers/irqchip/irq-mips-gic.c269
1 files changed, 226 insertions, 43 deletions
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 76253e864f23..bca8053864b2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -66,6 +66,87 @@ static struct gic_all_vpes_chip_data {
bool mask;
} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
+static int __gic_with_next_online_cpu(int prev)
+{
+ unsigned int cpu;
+
+ /* Discover the next online CPU */
+ cpu = cpumask_next(prev, cpu_online_mask);
+
+ /* If there isn't one, we're done */
+ if (cpu >= nr_cpu_ids)
+ return cpu;
+
+ /*
+ * Move the access lock to the next CPU's GIC local register block.
+ *
+ * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can
+ * clobber the written value.
+ */
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+
+ return cpu;
+}
+
+static inline void gic_unlock_cluster(void)
+{
+ if (mips_cps_multicluster_cpus())
+ mips_cm_unlock_other();
+}
+
+/**
+ * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers
+ * @cpu: An integer variable to hold the current CPU number
+ * @gic_lock: A pointer to raw spin lock used as a guard
+ *
+ * Iterate over online CPUs & configure the other/redirect register region to
+ * access each CPUs GIC local register block, which can be accessed from the
+ * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or
+ * their derivatives.
+ */
+#define for_each_online_cpu_gic(cpu, gic_lock) \
+ guard(raw_spinlock_irqsave)(gic_lock); \
+ for ((cpu) = __gic_with_next_online_cpu(-1); \
+ (cpu) < nr_cpu_ids; \
+ gic_unlock_cluster(), \
+ (cpu) = __gic_with_next_online_cpu(cpu))
+
+/**
+ * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster
+ * @d: struct irq_data corresponding to the interrupt we're interested in
+ *
+ * Locks redirect register block access to the global register block of the GIC
+ * within the remote cluster that the IRQ corresponding to @d is affine to,
+ * returning true when this redirect block setup & locking has been performed.
+ *
+ * If @d is affine to the local cluster then no locking is performed and this
+ * function will return false, indicating to the caller that it should access
+ * the local clusters registers without the overhead of indirection through the
+ * redirect block.
+ *
+ * In summary, if this function returns true then the caller should access GIC
+ * registers using redirect register block accessors & then call
+ * mips_cm_unlock_other() when done. If this function returns false then the
+ * caller should trivially access GIC registers in the local cluster.
+ *
+ * Returns true if locking performed, else false.
+ */
+static bool gic_irq_lock_cluster(struct irq_data *d)
+{
+ unsigned int cpu, cl;
+
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ BUG_ON(cpu >= NR_CPUS);
+
+ cl = cpu_cluster(&cpu_data[cpu]);
+ if (cl == cpu_cluster(&current_cpu_data))
+ return false;
+ if (mips_cps_numcores(cl) == 0)
+ return false;
+ mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ return true;
+}
+
static void gic_clear_pcpu_masks(unsigned int intr)
{
unsigned int i;
@@ -112,7 +193,12 @@ static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
{
irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
- write_gic_wedge(GIC_WEDGE_RW | hwirq);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_wedge(GIC_WEDGE_RW | hwirq);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_wedge(GIC_WEDGE_RW | hwirq);
+ }
}
int gic_get_c0_compare_int(void)
@@ -180,7 +266,13 @@ static void gic_mask_irq(struct irq_data *d)
{
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
- write_gic_rmask(intr);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_rmask(intr);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_rmask(intr);
+ }
+
gic_clear_pcpu_masks(intr);
}
@@ -189,7 +281,12 @@ static void gic_unmask_irq(struct irq_data *d)
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu;
- write_gic_smask(intr);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_smask(intr);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_smask(intr);
+ }
gic_clear_pcpu_masks(intr);
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
@@ -200,7 +297,12 @@ static void gic_ack_irq(struct irq_data *d)
{
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
- write_gic_wedge(irq);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_wedge(irq);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_wedge(irq);
+ }
}
static int gic_set_type(struct irq_data *d, unsigned int type)
@@ -240,9 +342,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
break;
}
- change_gic_pol(irq, pol);
- change_gic_trig(irq, trig);
- change_gic_dual(irq, dual);
+ if (gic_irq_lock_cluster(d)) {
+ change_gic_redir_pol(irq, pol);
+ change_gic_redir_trig(irq, trig);
+ change_gic_redir_dual(irq, dual);
+ mips_cm_unlock_other();
+ } else {
+ change_gic_pol(irq, pol);
+ change_gic_trig(irq, trig);
+ change_gic_dual(irq, dual);
+ }
if (trig == GIC_TRIG_EDGE)
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
@@ -260,25 +369,72 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned int cpu, cl, old_cpu, old_cl;
unsigned long flags;
- unsigned int cpu;
+ /*
+ * The GIC specifies that we can only route an interrupt to one VP(E),
+ * ie. CPU in Linux parlance, at a time. Therefore we always route to
+ * the first online CPU in the mask.
+ */
cpu = cpumask_first_and(cpumask, cpu_online_mask);
if (cpu >= NR_CPUS)
return -EINVAL;
- /* Assumption : cpumask refers to a single CPU */
- raw_spin_lock_irqsave(&gic_lock, flags);
+ old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ old_cl = cpu_cluster(&cpu_data[old_cpu]);
+ cl = cpu_cluster(&cpu_data[cpu]);
- /* Re-route this IRQ */
- write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+ raw_spin_lock_irqsave(&gic_lock, flags);
- /* Update the pcpu_masks */
- gic_clear_pcpu_masks(irq);
- if (read_gic_mask(irq))
- set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+ /*
+ * If we're moving affinity between clusters, stop routing the
+ * interrupt to any VP(E) in the old cluster.
+ */
+ if (cl != old_cl) {
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_map_vp(irq, 0);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_map_vp(irq, 0);
+ }
+ }
+ /*
+ * Update effective affinity - after this gic_irq_lock_cluster() will
+ * begin operating on the new cluster.
+ */
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ /*
+ * If we're moving affinity between clusters, configure the interrupt
+ * trigger type in the new cluster.
+ */
+ if (cl != old_cl)
+ gic_set_type(d, irqd_get_trigger_type(d));
+
+ /* Route the interrupt to its new VP(E) */
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_map_pin(irq,
+ GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+
+ /* Update the pcpu_masks */
+ gic_clear_pcpu_masks(irq);
+ if (read_gic_redir_mask(irq))
+ set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+
+ mips_cm_unlock_other();
+ } else {
+ write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+
+ /* Update the pcpu_masks */
+ gic_clear_pcpu_masks(irq);
+ if (read_gic_mask(irq))
+ set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+ }
+
raw_spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK;
@@ -350,37 +506,33 @@ static struct irq_chip gic_local_irq_controller = {
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
struct gic_all_vpes_chip_data *cd;
- unsigned long flags;
int intr, cpu;
+ if (!mips_cps_multicluster_cpus())
+ return;
+
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
cd->mask = false;
- raw_spin_lock_irqsave(&gic_lock, flags);
- for_each_online_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
+ for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_rmask(BIT(intr));
- }
- raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
struct gic_all_vpes_chip_data *cd;
- unsigned long flags;
int intr, cpu;
+ if (!mips_cps_multicluster_cpus())
+ return;
+
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
cd->mask = true;
- raw_spin_lock_irqsave(&gic_lock, flags);
- for_each_online_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
+ for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_smask(BIT(intr));
- }
- raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_all_vpes_irq_cpu_online(void)
@@ -436,11 +588,21 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
unsigned long flags;
data = irq_get_irq_data(virq);
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
raw_spin_lock_irqsave(&gic_lock, flags);
- write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
- write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
- irq_data_update_effective_affinity(data, cpumask_of(cpu));
+
+ /* Route the interrupt to its VP(E) */
+ if (gic_irq_lock_cluster(data)) {
+ write_gic_redir_map_pin(intr,
+ GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ mips_cm_unlock_other();
+ } else {
+ write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ }
+
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@@ -469,7 +631,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
struct gic_all_vpes_chip_data *cd;
- unsigned long flags;
unsigned int intr;
int err, cpu;
u32 map;
@@ -533,12 +694,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
- raw_spin_lock_irqsave(&gic_lock, flags);
- for_each_online_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+ if (mips_cps_multicluster_cpus()) {
+ for_each_online_cpu_gic(cpu, &gic_lock)
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
- raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -621,6 +780,9 @@ static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
if (ret)
goto error;
+ /* Set affinity to cpu. */
+ irq_data_update_effective_affinity(irq_get_irq_data(virq + i),
+ cpumask_of(cpu));
ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
if (ret)
goto error;
@@ -734,7 +896,7 @@ static int gic_cpu_startup(unsigned int cpu)
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, gicconfig;
+ unsigned int cpu_vec, i, gicconfig, cl, nclusters;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
@@ -815,11 +977,32 @@ static int __init gic_of_init(struct device_node *node,
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
- /* Setup defaults */
- for (i = 0; i < gic_shared_intrs; i++) {
- change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
- change_gic_trig(i, GIC_TRIG_LEVEL);
- write_gic_rmask(i);
+ /*
+ * Initialise each cluster's GIC shared registers to sane default
+ * values.
+ * Otherwise, the IPI set up will be erased if we move code
+ * to gic_cpu_startup for each cpu.
+ */
+ nclusters = mips_cps_numclusters();
+ for (cl = 0; cl < nclusters; cl++) {
+ if (cl == cpu_cluster(&current_cpu_data)) {
+ for (i = 0; i < gic_shared_intrs; i++) {
+ change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
+ change_gic_trig(i, GIC_TRIG_LEVEL);
+ write_gic_rmask(i);
+ }
+ } else if (mips_cps_numcores(cl) != 0) {
+ mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ for (i = 0; i < gic_shared_intrs; i++) {
+ change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
+ change_gic_redir_trig(i, GIC_TRIG_LEVEL);
+ write_gic_redir_rmask(i);
+ }
+ mips_cm_unlock_other();
+
+ } else {
+ pr_warn("No CPU cores on the cluster %d skip it\n", cl);
+ }
}
return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,