mirror of
https://github.com/torvalds/linux.git
synced 2025-04-11 04:53:02 +00:00

Frank reported, that the common irq_force_complete_move() breaks the out of tree build of ia64. The reason is that ia64 uses the migration code, but does not have hierarchical interrupt domains enabled. This went unnoticed in mainline as both x86 and RISC-V have hierarchical domains enabled. Not that it matters for mainline, but it's still inconsistent. Use irqd_get_parent_data() instead of accessing the parent_data field directly. The helper returns NULL when hierarchical domains are disabled otherwise it accesses the parent_data field of the domain. No functional change. Fixes: 751dc837dabd ("genirq: Introduce common irq_force_complete_move() implementation") Reported-by: Frank Scheiner <frank.scheiner@web.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Frank Scheiner <frank.scheiner@web.de> Link: https://lore.kernel.org/all/87h634ugig.ffs@tglx
140 lines
3.6 KiB
C
140 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/irq.h>
|
|
#include <linux/interrupt.h>
|
|
|
|
#include "internals.h"
|
|
|
|
/**
|
|
* irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
|
|
* @desc: Interrupt descriptor to clean up
|
|
* @force_clear: If set clear the move pending bit unconditionally.
|
|
* If not set, clear it only when the dying CPU is the
|
|
* last one in the pending mask.
|
|
*
|
|
* Returns true if the pending bit was set and the pending mask contains an
|
|
* online CPU other than the dying CPU.
|
|
*/
|
|
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
|
|
{
|
|
struct irq_data *data = irq_desc_get_irq_data(desc);
|
|
|
|
if (!irqd_is_setaffinity_pending(data))
|
|
return false;
|
|
|
|
/*
|
|
* The outgoing CPU might be the last online target in a pending
|
|
* interrupt move. If that's the case clear the pending move bit.
|
|
*/
|
|
if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
|
|
irqd_clr_move_pending(data);
|
|
return false;
|
|
}
|
|
if (force_clear)
|
|
irqd_clr_move_pending(data);
|
|
return true;
|
|
}
|
|
|
|
void irq_force_complete_move(struct irq_desc *desc)
|
|
{
|
|
for (struct irq_data *d = irq_desc_get_irq_data(desc); d; d = irqd_get_parent_data(d)) {
|
|
if (d->chip && d->chip->irq_force_complete_move) {
|
|
d->chip->irq_force_complete_move(d);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
void irq_move_masked_irq(struct irq_data *idata)
|
|
{
|
|
struct irq_desc *desc = irq_data_to_desc(idata);
|
|
struct irq_data *data = &desc->irq_data;
|
|
struct irq_chip *chip = data->chip;
|
|
|
|
if (likely(!irqd_is_setaffinity_pending(data)))
|
|
return;
|
|
|
|
irqd_clr_move_pending(data);
|
|
|
|
/*
|
|
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
|
*/
|
|
if (irqd_is_per_cpu(data)) {
|
|
WARN_ON(1);
|
|
return;
|
|
}
|
|
|
|
if (unlikely(cpumask_empty(desc->pending_mask)))
|
|
return;
|
|
|
|
if (!chip->irq_set_affinity)
|
|
return;
|
|
|
|
assert_raw_spin_locked(&desc->lock);
|
|
|
|
/*
|
|
* If there was a valid mask to work with, please
|
|
* do the disable, re-program, enable sequence.
|
|
* This is *not* particularly important for level triggered
|
|
* but in a edge trigger case, we might be setting rte
|
|
* when an active trigger is coming in. This could
|
|
* cause some ioapics to mal-function.
|
|
* Being paranoid i guess!
|
|
*
|
|
* For correct operation this depends on the caller
|
|
* masking the irqs.
|
|
*/
|
|
if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
|
|
int ret;
|
|
|
|
ret = irq_do_set_affinity(data, desc->pending_mask, false);
|
|
/*
|
|
* If the there is a cleanup pending in the underlying
|
|
* vector management, reschedule the move for the next
|
|
* interrupt. Leave desc->pending_mask intact.
|
|
*/
|
|
if (ret == -EBUSY) {
|
|
irqd_set_move_pending(data);
|
|
return;
|
|
}
|
|
}
|
|
cpumask_clear(desc->pending_mask);
|
|
}
|
|
|
|
void __irq_move_irq(struct irq_data *idata)
|
|
{
|
|
bool masked;
|
|
|
|
/*
|
|
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
|
|
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
|
|
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
|
|
*/
|
|
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
|
|
|
if (unlikely(irqd_irq_disabled(idata)))
|
|
return;
|
|
|
|
/*
|
|
* Be careful vs. already masked interrupts. If this is a
|
|
* threaded interrupt with ONESHOT set, we can end up with an
|
|
* interrupt storm.
|
|
*/
|
|
masked = irqd_irq_masked(idata);
|
|
if (!masked)
|
|
idata->chip->irq_mask(idata);
|
|
irq_move_masked_irq(idata);
|
|
if (!masked)
|
|
idata->chip->irq_unmask(idata);
|
|
}
|
|
|
|
bool irq_can_move_in_process_context(struct irq_data *data)
|
|
{
|
|
/*
|
|
* Get the top level irq_data in the hierarchy, which is optimized
|
|
* away when CONFIG_IRQ_DOMAIN_HIERARCHY is disabled.
|
|
*/
|
|
data = irq_desc_get_irq_data(irq_data_to_desc(data));
|
|
return irq_can_move_pcntxt(data);
|
|
}
|