mirror of
https://github.com/torvalds/linux.git
synced 2025-04-11 04:53:02 +00:00

- Support for hard indices on RISC-V. The hart index identifies a hart (core) within a specific interrupt domain in RISC-V's Priviledged Architecture. - Rework of the RISC-V MSI driver. This moves the driver over to the generic MSI library and solves the affinity problem of unmaskable PCI/MSI controllers. Unmaskable PCI/MSI controllers are prone to lose interrupts when the MSI message is updated to change the affinity because the message write consists of three 32-bit subsequent writes, which update address and data. As these writes are non-atomic versus the device raising an interrupt, the device can observe a half written update and issue an interrupt on the wrong vector. This is mitiated by a carefully orchestrated step by step update and the observation of an eventually pending interrupt on the CPU which issues the update. The algorithm follows the well established method of the X86 MSI driver. - A new driver for the RISC-V Sophgo SG2042 MSI controller - Overhaul of the Renesas RZQ2L driver. Simplification of the probe function by using devm_*() mechanisms, which avoid the endless list of error prone gotos in the failure paths. - Expand the Renesas RZV2H driver to support RZ/G3E SoCs - A workaround for Rockchip 3568002 erratum in the GIC-V3 driver to ensure that the addressing is limited to the lower 32-bit of the physical address space. - Add support for the Allwinner AS23 NMI controller - Expand the IMX irqsteer driver to handle up to 960 input interrupts - The usual small updates, cleanups and device tree changes. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmff454THHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoZqoD/4kdHzbxfLpf7vC3NnG8NWwTq5FpbSx 6grQC9hWNMAs4n2IFjJRFLrjeX3AcdAQXL/BWuM0LfW9tQDQaVmqlSIlB/bn69KB 7HyAR6ozbOgnHKGAqFUXSLf+4pq+6q3mOgGKIF289dy14HFu4ta0DqKgkPZeQnVs R/J8i7REUnn+YuxzSt5eOqyDPyt2EHJosSUABSWQZBlrM9jy1W7f6NqDFwawiVsa +tv4U/bz91vjzVxwTIgt7nJK+b2HVYdxoZYuKJwPaTsj26ANPp6ltjRTeOmZhb5h uKgw+OyzDnk6q+tjGcRqrqwl291VKxCvnRiqHFfu3CERdmI9qvpN9IRcEJqIbkcN cakekhAyt7OO7sEPcql5vBL97e9hpb7EcH78gYxwHf8Dy0rFZUvSC5v+L6VRFnJS XcKA1L+f9B6u5qxnBtLan9IW08HYNdvmPq6AuVjk+ndKioPUFqB2q6AtXpuA3Rmu Y3XH/wh/q5wk0pgeByxQW6swsfpMN3OYK3mpLx475wFh2NKzcdGlwGhDFhiw8DKX m1AESy3UZatj1a0qGaFS/M+mm9KGrDYIMrje832Wf4Yf1LGmTsDkd3/V99oazSsq Jm4qhDASXChJXd0imQICX9hPw0aHTlLYNs54obUXVULH4HivQKIgWhUXrjG0dBDL +tttjuv5FJxr3A== =jPHa -----END PGP SIGNATURE----- Merge tag 'irq-drivers-2025-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq driver updates from Thomas Gleixner: - Support for hard indices on RISC-V. The hart index identifies a hart (core) within a specific interrupt domain in RISC-V's Priviledged Architecture. - Rework of the RISC-V MSI driver This moves the driver over to the generic MSI library and solves the affinity problem of unmaskable PCI/MSI controllers. Unmaskable PCI/MSI controllers are prone to lose interrupts when the MSI message is updated to change the affinity because the message write consists of three 32-bit subsequent writes, which update address and data. As these writes are non-atomic versus the device raising an interrupt, the device can observe a half written update and issue an interrupt on the wrong vector. This is mitiated by a carefully orchestrated step by step update and the observation of an eventually pending interrupt on the CPU which issues the update. The algorithm follows the well established method of the X86 MSI driver. - A new driver for the RISC-V Sophgo SG2042 MSI controller - Overhaul of the Renesas RZQ2L driver Simplification of the probe function by using devm_*() mechanisms, which avoid the endless list of error prone gotos in the failure paths. - Expand the Renesas RZV2H driver to support RZ/G3E SoCs - A workaround for Rockchip 3568002 erratum in the GIC-V3 driver to ensure that the addressing is limited to the lower 32-bit of the physical address space. - Add support for the Allwinner AS23 NMI controller - Expand the IMX irqsteer driver to handle up to 960 input interrupts - The usual small updates, cleanups and device tree changes * tag 'irq-drivers-2025-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits) irqchip/imx-irqsteer: Support up to 960 input interrupts irqchip/sunxi-nmi: Support Allwinner A523 NMI controller dt-bindings: irq: sun7i-nmi: Document the Allwinner A523 NMI controller irqchip/davinci-cp-intc: Remove public header irqchip/renesas-rzv2h: Add RZ/G3E support irqchip/renesas-rzv2h: Update macros ICU_TSSR_TSSEL_{MASK,PREP} irqchip/renesas-rzv2h: Update TSSR_TIEN macro irqchip/renesas-rzv2h: Add field_width to struct rzv2h_hw_info irqchip/renesas-rzv2h: Add max_tssel to struct rzv2h_hw_info irqchip/renesas-rzv2h: Add struct rzv2h_hw_info with t_offs variable irqchip/renesas-rzv2h: Use devm_pm_runtime_enable() irqchip/renesas-rzv2h: Use devm_reset_control_get_exclusive_deasserted() irqchip/renesas-rzv2h: Simplify rzv2h_icu_init() irqchip/renesas-rzv2h: Drop irqchip from struct rzv2h_icu_priv irqchip/renesas-rzv2h: Fix wrong variable usage in rzv2h_tint_set_type() dt-bindings: interrupt-controller: renesas,rzv2h-icu: Document RZ/G3E SoC riscv: sophgo: dts: Add msi controller for SG2042 irqchip: Add the Sophgo SG2042 MSI interrupt controller dt-bindings: interrupt-controller: Add Sophgo SG2042 MSI arm64: dts: rockchip: rk356x: Move PCIe MSI to use GIC ITS instead of MBI ...
524 lines
15 KiB
C
524 lines
15 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* IRQ subsystem internal functions and variables:
|
|
*
|
|
* Do not ever include this file from anything else than
|
|
* kernel/irq/. Do not even think about using any information outside
|
|
* of this file for your non core code.
|
|
*/
|
|
#include <linux/irqdesc.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/sched/clock.h>
|
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
# define MAX_SPARSE_IRQS INT_MAX
|
|
#else
|
|
# define MAX_SPARSE_IRQS NR_IRQS
|
|
#endif
|
|
|
|
#define istate core_internal_state__do_not_mess_with_it
|
|
|
|
extern bool noirqdebug;
|
|
|
|
extern struct irqaction chained_action;
|
|
|
|
/*
|
|
* Bits used by threaded handlers:
|
|
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
|
|
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
|
|
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
|
|
* IRQTF_FORCED_THREAD - irq action is force threaded
|
|
* IRQTF_READY - signals that irq thread is ready
|
|
*/
|
|
enum {
|
|
IRQTF_RUNTHREAD,
|
|
IRQTF_WARNED,
|
|
IRQTF_AFFINITY,
|
|
IRQTF_FORCED_THREAD,
|
|
IRQTF_READY,
|
|
};
|
|
|
|
/*
|
|
* Bit masks for desc->core_internal_state__do_not_mess_with_it
|
|
*
|
|
* IRQS_AUTODETECT - autodetection in progress
|
|
* IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
|
|
* detection
|
|
* IRQS_POLL_INPROGRESS - polling in progress
|
|
* IRQS_ONESHOT - irq is not unmasked in primary handler
|
|
* IRQS_REPLAY - irq has been resent and will not be resent
|
|
* again until the handler has run and cleared
|
|
* this flag.
|
|
* IRQS_WAITING - irq is waiting
|
|
* IRQS_PENDING - irq needs to be resent and should be resent
|
|
* at the next available opportunity.
|
|
* IRQS_SUSPENDED - irq is suspended
|
|
* IRQS_NMI - irq line is used to deliver NMIs
|
|
* IRQS_SYSFS - descriptor has been added to sysfs
|
|
*/
|
|
enum {
|
|
IRQS_AUTODETECT = 0x00000001,
|
|
IRQS_SPURIOUS_DISABLED = 0x00000002,
|
|
IRQS_POLL_INPROGRESS = 0x00000008,
|
|
IRQS_ONESHOT = 0x00000020,
|
|
IRQS_REPLAY = 0x00000040,
|
|
IRQS_WAITING = 0x00000080,
|
|
IRQS_PENDING = 0x00000200,
|
|
IRQS_SUSPENDED = 0x00000800,
|
|
IRQS_TIMINGS = 0x00001000,
|
|
IRQS_NMI = 0x00002000,
|
|
IRQS_SYSFS = 0x00004000,
|
|
};
|
|
|
|
#include "debug.h"
|
|
#include "settings.h"
|
|
|
|
extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags);
|
|
extern void __disable_irq(struct irq_desc *desc);
|
|
extern void __enable_irq(struct irq_desc *desc);
|
|
|
|
#define IRQ_RESEND true
|
|
#define IRQ_NORESEND false
|
|
|
|
#define IRQ_START_FORCE true
|
|
#define IRQ_START_COND false
|
|
|
|
extern int irq_activate(struct irq_desc *desc);
|
|
extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
|
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
|
|
|
extern void irq_shutdown(struct irq_desc *desc);
|
|
extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
|
|
extern void irq_disable(struct irq_desc *desc);
|
|
extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
|
|
extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
|
|
extern void mask_irq(struct irq_desc *desc);
|
|
extern void unmask_irq(struct irq_desc *desc);
|
|
extern void unmask_threaded_irq(struct irq_desc *desc);
|
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
static inline void irq_mark_irq(unsigned int irq) { }
|
|
#else
|
|
extern void irq_mark_irq(unsigned int irq);
|
|
#endif
|
|
|
|
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc);
|
|
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
|
|
irqreturn_t handle_irq_event(struct irq_desc *desc);
|
|
|
|
/* Resending of interrupts :*/
|
|
int check_irq_resend(struct irq_desc *desc, bool inject);
|
|
void clear_irq_resend(struct irq_desc *desc);
|
|
void irq_resend_init(struct irq_desc *desc);
|
|
bool irq_wait_for_poll(struct irq_desc *desc);
|
|
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
|
|
|
|
void wake_threads_waitq(struct irq_desc *desc);
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
|
extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
|
|
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
|
|
extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
|
|
#else
|
|
static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
|
static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
|
static inline void register_handler_proc(unsigned int irq,
|
|
struct irqaction *action) { }
|
|
static inline void unregister_handler_proc(unsigned int irq,
|
|
struct irqaction *action) { }
|
|
#endif
|
|
|
|
extern bool irq_can_set_affinity_usr(unsigned int irq);
|
|
|
|
extern int irq_do_set_affinity(struct irq_data *data,
|
|
const struct cpumask *dest, bool force);
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern int irq_setup_affinity(struct irq_desc *desc);
|
|
#else
|
|
static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
|
|
#endif
|
|
|
|
/* Inline functions for support of irq chips on slow busses */
|
|
static inline void chip_bus_lock(struct irq_desc *desc)
|
|
{
|
|
if (unlikely(desc->irq_data.chip->irq_bus_lock))
|
|
desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
|
|
}
|
|
|
|
static inline void chip_bus_sync_unlock(struct irq_desc *desc)
|
|
{
|
|
if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
|
|
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
|
|
}
|
|
|
|
#define _IRQ_DESC_CHECK (1 << 0)
|
|
#define _IRQ_DESC_PERCPU (1 << 1)
|
|
|
|
#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
|
|
#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
|
|
|
|
#define for_each_action_of_desc(desc, act) \
|
|
for (act = desc->action; act; act = act->next)
|
|
|
|
struct irq_desc *
|
|
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
|
|
unsigned int check);
|
|
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
|
|
|
|
static inline struct irq_desc *
|
|
irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
|
|
{
|
|
return __irq_get_desc_lock(irq, flags, true, check);
|
|
}
|
|
|
|
static inline void
|
|
irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
|
|
{
|
|
__irq_put_desc_unlock(desc, flags, true);
|
|
}
|
|
|
|
static inline struct irq_desc *
|
|
irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
|
|
{
|
|
return __irq_get_desc_lock(irq, flags, false, check);
|
|
}
|
|
|
|
static inline void
|
|
irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
|
|
{
|
|
__irq_put_desc_unlock(desc, flags, false);
|
|
}
|
|
|
|
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
|
|
|
|
static inline unsigned int irqd_get(struct irq_data *d)
|
|
{
|
|
return __irqd_to_state(d);
|
|
}
|
|
|
|
/*
|
|
* Manipulation functions for irq_data.state
|
|
*/
|
|
static inline void irqd_set_move_pending(struct irq_data *d)
|
|
{
|
|
__irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING;
|
|
}
|
|
|
|
static inline void irqd_clr_move_pending(struct irq_data *d)
|
|
{
|
|
__irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING;
|
|
}
|
|
|
|
static inline void irqd_set_managed_shutdown(struct irq_data *d)
|
|
{
|
|
__irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN;
|
|
}
|
|
|
|
static inline void irqd_clr_managed_shutdown(struct irq_data *d)
|
|
{
|
|
__irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN;
|
|
}
|
|
|
|
static inline void irqd_clear(struct irq_data *d, unsigned int mask)
|
|
{
|
|
__irqd_to_state(d) &= ~mask;
|
|
}
|
|
|
|
static inline void irqd_set(struct irq_data *d, unsigned int mask)
|
|
{
|
|
__irqd_to_state(d) |= mask;
|
|
}
|
|
|
|
static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
|
|
{
|
|
return __irqd_to_state(d) & mask;
|
|
}
|
|
|
|
static inline void irq_state_set_disabled(struct irq_desc *desc)
|
|
{
|
|
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
|
|
}
|
|
|
|
static inline void irq_state_set_masked(struct irq_desc *desc)
|
|
{
|
|
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
|
|
}
|
|
|
|
#undef __irqd_to_state
|
|
|
|
static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
|
|
{
|
|
__this_cpu_inc(desc->kstat_irqs->cnt);
|
|
__this_cpu_inc(kstat.irqs_sum);
|
|
}
|
|
|
|
static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
|
|
{
|
|
__kstat_incr_irqs_this_cpu(desc);
|
|
desc->tot_count++;
|
|
}
|
|
|
|
static inline int irq_desc_get_node(struct irq_desc *desc)
|
|
{
|
|
return irq_common_data_get_node(&desc->irq_common_data);
|
|
}
|
|
|
|
static inline int irq_desc_is_chained(struct irq_desc *desc)
|
|
{
|
|
return (desc->action && desc->action == &chained_action);
|
|
}
|
|
|
|
static inline bool irq_is_nmi(struct irq_desc *desc)
|
|
{
|
|
return desc->istate & IRQS_NMI;
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
bool irq_pm_check_wakeup(struct irq_desc *desc);
|
|
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
|
|
void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
|
|
#else
|
|
static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; }
|
|
static inline void
|
|
irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
|
|
static inline void
|
|
irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_TIMINGS
|
|
|
|
#define IRQ_TIMINGS_SHIFT 5
|
|
#define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT)
|
|
#define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1)
|
|
|
|
/**
|
|
* struct irq_timings - irq timings storing structure
|
|
* @values: a circular buffer of u64 encoded <timestamp,irq> values
|
|
* @count: the number of elements in the array
|
|
*/
|
|
struct irq_timings {
|
|
u64 values[IRQ_TIMINGS_SIZE];
|
|
int count;
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct irq_timings, irq_timings);
|
|
|
|
extern void irq_timings_free(int irq);
|
|
extern int irq_timings_alloc(int irq);
|
|
|
|
static inline void irq_remove_timings(struct irq_desc *desc)
|
|
{
|
|
desc->istate &= ~IRQS_TIMINGS;
|
|
|
|
irq_timings_free(irq_desc_get_irq(desc));
|
|
}
|
|
|
|
static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act)
|
|
{
|
|
int irq = irq_desc_get_irq(desc);
|
|
int ret;
|
|
|
|
/*
|
|
* We don't need the measurement because the idle code already
|
|
* knows the next expiry event.
|
|
*/
|
|
if (act->flags & __IRQF_TIMER)
|
|
return;
|
|
|
|
/*
|
|
* In case the timing allocation fails, we just want to warn,
|
|
* not fail, so letting the system boot anyway.
|
|
*/
|
|
ret = irq_timings_alloc(irq);
|
|
if (ret) {
|
|
pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
|
|
irq, ret);
|
|
return;
|
|
}
|
|
|
|
desc->istate |= IRQS_TIMINGS;
|
|
}
|
|
|
|
extern void irq_timings_enable(void);
|
|
extern void irq_timings_disable(void);
|
|
|
|
DECLARE_STATIC_KEY_FALSE(irq_timing_enabled);
|
|
|
|
/*
|
|
* The interrupt number and the timestamp are encoded into a single
|
|
* u64 variable to optimize the size.
|
|
* 48 bit time stamp and 16 bit IRQ number is way sufficient.
|
|
* Who cares an IRQ after 78 hours of idle time?
|
|
*/
|
|
static inline u64 irq_timing_encode(u64 timestamp, int irq)
|
|
{
|
|
return (timestamp << 16) | irq;
|
|
}
|
|
|
|
static inline int irq_timing_decode(u64 value, u64 *timestamp)
|
|
{
|
|
*timestamp = value >> 16;
|
|
return value & U16_MAX;
|
|
}
|
|
|
|
static __always_inline void irq_timings_push(u64 ts, int irq)
|
|
{
|
|
struct irq_timings *timings = this_cpu_ptr(&irq_timings);
|
|
|
|
timings->values[timings->count & IRQ_TIMINGS_MASK] =
|
|
irq_timing_encode(ts, irq);
|
|
|
|
timings->count++;
|
|
}
|
|
|
|
/*
|
|
* The function record_irq_time is only called in one place in the
|
|
* interrupts handler. We want this function always inline so the code
|
|
* inside is embedded in the function and the static key branching
|
|
* code can act at the higher level. Without the explicit
|
|
* __always_inline we can end up with a function call and a small
|
|
* overhead in the hotpath for nothing.
|
|
*/
|
|
static __always_inline void record_irq_time(struct irq_desc *desc)
|
|
{
|
|
if (!static_branch_likely(&irq_timing_enabled))
|
|
return;
|
|
|
|
if (desc->istate & IRQS_TIMINGS)
|
|
irq_timings_push(local_clock(), irq_desc_get_irq(desc));
|
|
}
|
|
#else
|
|
static inline void irq_remove_timings(struct irq_desc *desc) {}
|
|
static inline void irq_setup_timings(struct irq_desc *desc,
|
|
struct irqaction *act) {};
|
|
static inline void record_irq_time(struct irq_desc *desc) {}
|
|
#endif /* CONFIG_IRQ_TIMINGS */
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_IRQ_CHIP
|
|
void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
|
|
int num_ct, unsigned int irq_base,
|
|
void __iomem *reg_base, irq_flow_handler_t handler);
|
|
#else
|
|
static inline void
|
|
irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
|
|
int num_ct, unsigned int irq_base,
|
|
void __iomem *reg_base, irq_flow_handler_t handler) { }
|
|
#endif /* CONFIG_GENERIC_IRQ_CHIP */
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
static inline bool irq_can_move_pcntxt(struct irq_data *data)
|
|
{
|
|
return !(data->chip->flags & IRQCHIP_MOVE_DEFERRED);
|
|
}
|
|
static inline bool irq_move_pending(struct irq_data *data)
|
|
{
|
|
return irqd_is_setaffinity_pending(data);
|
|
}
|
|
static inline void
|
|
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
|
|
{
|
|
cpumask_copy(desc->pending_mask, mask);
|
|
}
|
|
static inline void
|
|
irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
|
|
{
|
|
cpumask_copy(mask, desc->pending_mask);
|
|
}
|
|
static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
|
|
{
|
|
return desc->pending_mask;
|
|
}
|
|
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear);
|
|
void irq_force_complete_move(struct irq_desc *desc);
|
|
#else /* CONFIG_GENERIC_PENDING_IRQ */
|
|
static inline bool irq_can_move_pcntxt(struct irq_data *data)
|
|
{
|
|
return true;
|
|
}
|
|
static inline bool irq_move_pending(struct irq_data *data)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void
|
|
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
|
|
{
|
|
}
|
|
static inline void
|
|
irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
|
|
{
|
|
}
|
|
static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void irq_force_complete_move(struct irq_desc *desc) { }
|
|
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
|
|
|
|
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
|
|
static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
|
|
{
|
|
irqd_set_activated(data);
|
|
return 0;
|
|
}
|
|
static inline void irq_domain_deactivate_irq(struct irq_data *data)
|
|
{
|
|
irqd_clr_activated(data);
|
|
}
|
|
#endif
|
|
|
|
static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd)
|
|
{
|
|
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
|
return irqd->parent_data;
|
|
#else
|
|
return NULL;
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
|
#include <linux/debugfs.h>
|
|
|
|
struct irq_bit_descr {
|
|
unsigned int mask;
|
|
char *name;
|
|
};
|
|
|
|
#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
|
|
|
|
void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
|
|
const struct irq_bit_descr *sd, int size);
|
|
|
|
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
|
|
static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
|
|
{
|
|
debugfs_remove(desc->debugfs_file);
|
|
kfree(desc->dev_name);
|
|
}
|
|
void irq_debugfs_copy_devname(int irq, struct device *dev);
|
|
# ifdef CONFIG_IRQ_DOMAIN
|
|
void irq_domain_debugfs_init(struct dentry *root);
|
|
# else
|
|
static inline void irq_domain_debugfs_init(struct dentry *root)
|
|
{
|
|
}
|
|
# endif
|
|
#else /* CONFIG_GENERIC_IRQ_DEBUGFS */
|
|
static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
|
|
{
|
|
}
|
|
static inline void irq_remove_debugfs_entry(struct irq_desc *d)
|
|
{
|
|
}
|
|
static inline void irq_debugfs_copy_devname(int irq, struct device *dev)
|
|
{
|
|
}
|
|
#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
|