linux/drivers/gpu/drm/v3d/v3d_irq.c
Maíra Canal 6e64d6b3a3
drm/v3d: Assign job pointer to NULL before signaling the fence
In commit e4b5ccd392b9 ("drm/v3d: Ensure job pointer is set to NULL
after job completion"), we introduced a change to assign the job pointer
to NULL after completing a job, indicating job completion.

However, this approach created a race condition between the DRM
scheduler workqueue and the IRQ execution thread. As soon as the fence is
signaled in the IRQ execution thread, a new job starts to be executed.
This results in a race condition where the IRQ execution thread sets the
job pointer to NULL simultaneously as the `run_job()` function assigns
a new job to the pointer.

This race condition can lead to a NULL pointer dereference if the IRQ
execution thread sets the job pointer to NULL after `run_job()` assigns
it to the new job. When the new job completes and the GPU emits an
interrupt, `v3d_irq()` is triggered, potentially causing a crash.

[  466.310099] Unable to handle kernel NULL pointer dereference at virtual address 00000000000000c0
[  466.318928] Mem abort info:
[  466.321723]   ESR = 0x0000000096000005
[  466.325479]   EC = 0x25: DABT (current EL), IL = 32 bits
[  466.330807]   SET = 0, FnV = 0
[  466.333864]   EA = 0, S1PTW = 0
[  466.337010]   FSC = 0x05: level 1 translation fault
[  466.341900] Data abort info:
[  466.344783]   ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000
[  466.350285]   CM = 0, WnR = 0, TnD = 0, TagAccess = 0
[  466.355350]   GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
[  466.360677] user pgtable: 4k pages, 39-bit VAs, pgdp=0000000089772000
[  466.367140] [00000000000000c0] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000
[  466.375875] Internal error: Oops: 0000000096000005 [#1] PREEMPT SMP
[  466.382163] Modules linked in: rfcomm snd_seq_dummy snd_hrtimer snd_seq snd_seq_device algif_hash algif_skcipher af_alg bnep binfmt_misc vc4 snd_soc_hdmi_codec drm_display_helper cec brcmfmac_wcc spidev rpivid_hevc(C) drm_client_lib brcmfmac hci_uart drm_dma_helper pisp_be btbcm brcmutil snd_soc_core aes_ce_blk v4l2_mem2mem bluetooth aes_ce_cipher snd_compress videobuf2_dma_contig ghash_ce cfg80211 gf128mul snd_pcm_dmaengine videobuf2_memops ecdh_generic sha2_ce ecc videobuf2_v4l2 snd_pcm v3d sha256_arm64 rfkill videodev snd_timer sha1_ce libaes gpu_sched snd videobuf2_common sha1_generic drm_shmem_helper mc rp1_pio drm_kms_helper raspberrypi_hwmon spi_bcm2835 gpio_keys i2c_brcmstb rp1 raspberrypi_gpiomem rp1_mailbox rp1_adc nvmem_rmem uio_pdrv_genirq uio i2c_dev drm ledtrig_pattern drm_panel_orientation_quirks backlight fuse dm_mod ip_tables x_tables ipv6
[  466.458429] CPU: 0 UID: 1000 PID: 2008 Comm: chromium Tainted: G         C         6.13.0-v8+ #18
[  466.467336] Tainted: [C]=CRAP
[  466.470306] Hardware name: Raspberry Pi 5 Model B Rev 1.0 (DT)
[  466.476157] pstate: 404000c9 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[  466.483143] pc : v3d_irq+0x118/0x2e0 [v3d]
[  466.487258] lr : __handle_irq_event_percpu+0x60/0x228
[  466.492327] sp : ffffffc080003ea0
[  466.495646] x29: ffffffc080003ea0 x28: ffffff80c0c94200 x27: 0000000000000000
[  466.502807] x26: ffffffd08dd81d7b x25: ffffff80c0c94200 x24: ffffff8003bdc200
[  466.509969] x23: 0000000000000001 x22: 00000000000000a7 x21: 0000000000000000
[  466.517130] x20: ffffff8041bb0000 x19: 0000000000000001 x18: 0000000000000000
[  466.524291] x17: ffffffafadfb0000 x16: ffffffc080000000 x15: 0000000000000000
[  466.531452] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000
[  466.538613] x11: 0000000000000000 x10: 0000000000000000 x9 : ffffffd08c527eb0
[  466.545777] x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000000000
[  466.552941] x5 : ffffffd08c4100d0 x4 : ffffffafadfb0000 x3 : ffffffc080003f70
[  466.560102] x2 : ffffffc0829e8058 x1 : 0000000000000001 x0 : 0000000000000000
[  466.567263] Call trace:
[  466.569711]  v3d_irq+0x118/0x2e0 [v3d] (P)
[  466.573826]  __handle_irq_event_percpu+0x60/0x228
[  466.578546]  handle_irq_event+0x54/0xb8
[  466.582391]  handle_fasteoi_irq+0xac/0x240
[  466.586498]  generic_handle_domain_irq+0x34/0x58
[  466.591128]  gic_handle_irq+0x48/0xd8
[  466.594798]  call_on_irq_stack+0x24/0x58
[  466.598730]  do_interrupt_handler+0x88/0x98
[  466.602923]  el0_interrupt+0x44/0xc0
[  466.606508]  __el0_irq_handler_common+0x18/0x28
[  466.611050]  el0t_64_irq_handler+0x10/0x20
[  466.615156]  el0t_64_irq+0x198/0x1a0
[  466.618740] Code: 52800035 3607faf3 f9442e80 52800021 (f9406018)
[  466.624853] ---[ end trace 0000000000000000 ]---
[  466.629483] Kernel panic - not syncing: Oops: Fatal exception in interrupt
[  466.636384] SMP: stopping secondary CPUs
[  466.640320] Kernel Offset: 0x100c400000 from 0xffffffc080000000
[  466.646259] PHYS_OFFSET: 0x0
[  466.649141] CPU features: 0x100,00000170,00901250,0200720b
[  466.654644] Memory Limit: none
[  466.657706] ---[ end Kernel panic - not syncing: Oops: Fatal exception in interrupt ]---

Fix the crash by assigning the job pointer to NULL before signaling the
fence. This ensures that the job pointer is cleared before any new job
starts execution, preventing the race condition and the NULL pointer
dereference crash.

Cc: stable@vger.kernel.org
Fixes: e4b5ccd392b9 ("drm/v3d: Ensure job pointer is set to NULL after job completion")
Signed-off-by: Maíra Canal <mcanal@igalia.com>
Reviewed-by: Jose Maria Casanova Crespo <jmcasanova@igalia.com>
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
Tested-by: Phil Elwell <phil@raspberrypi.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250123012403.20447-1-mcanal@igalia.com
2025-01-23 16:15:01 -03:00

315 lines
7.8 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
/**
* DOC: Interrupt management for the V3D engine
*
* When we take a bin, render, TFU done, or CSD done interrupt, we
* need to signal the fence for that job so that the scheduler can
* queue up the next one and unblock any waiters.
*
* When we take the binner out of memory interrupt, we need to
* allocate some new memory and pass it to the binner so that the
* current job can make progress.
*/
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
#define V3D_CORE_IRQS(ver) ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
V3D_INT_FRDONE | \
V3D_INT_CSDDONE(ver) | \
(ver < 71 ? V3D_INT_GMPV : 0)))
#define V3D_HUB_IRQS(ver) ((u32)(V3D_HUB_INT_MMU_WRV | \
V3D_HUB_INT_MMU_PTI | \
V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC | \
(ver >= 71 ? V3D_V7_HUB_INT_GMPV : 0)))
static irqreturn_t
v3d_hub_irq(int irq, void *arg);
static void
v3d_overflow_mem_work(struct work_struct *work)
{
struct v3d_dev *v3d =
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
struct drm_gem_object *obj;
unsigned long irqflags;
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
obj = &bo->base.base;
/* We lost a race, and our work task came in after the bin job
* completed and exited. This can happen because the HW
* signals OOM before it's fully OOM, so the binner might just
* barely complete.
*
* If we lose the race and our work task comes in after a new
* bin job got scheduled, that's fine. We'll just give them
* some binner pool anyway.
*/
spin_lock_irqsave(&v3d->job_lock, irqflags);
if (!v3d->bin_job) {
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
goto out;
}
drm_gem_object_get(obj);
list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
v3d_mmu_flush_all(v3d);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT);
V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
drm_gem_object_put(obj);
}
static irqreturn_t
v3d_irq(int irq, void *arg)
{
struct v3d_dev *v3d = arg;
u32 intsts;
irqreturn_t status = IRQ_NONE;
intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
/* Acknowledge the interrupts we're handling here. */
V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
if (intsts & V3D_INT_OUTOMEM) {
/* Note that the OOM status is edge signaled, so the
* interrupt won't happen again until the we actually
* add more memory. Also, as of V3D 4.1, FLDONE won't
* be reported until any OOM state has been cleared.
*/
schedule_work(&v3d->overflow_mem_work);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->base.irq_fence);
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
v3d->bin_job = NULL;
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->base.irq_fence);
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
v3d->render_job = NULL;
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
struct v3d_fence *fence =
to_v3d_fence(v3d->csd_job->base.irq_fence);
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
v3d->csd_job = NULL;
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
* didn't see the common one then check hub for MMU IRQs.
*/
if (v3d->single_irq_line && status == IRQ_NONE)
return v3d_hub_irq(irq, arg);
return status;
}
static irqreturn_t
v3d_hub_irq(int irq, void *arg)
{
struct v3d_dev *v3d = arg;
u32 intsts;
irqreturn_t status = IRQ_NONE;
intsts = V3D_READ(V3D_HUB_INT_STS);
/* Acknowledge the interrupts we're handling here. */
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->base.irq_fence);
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
v3d->tfu_job = NULL;
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & (V3D_HUB_INT_MMU_WRV |
V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) {
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
(v3d->va_width - 32));
static const char *const v3d41_axi_ids[] = {
"L2T",
"PTB",
"PSE",
"TLB",
"CLE",
"TFU",
"MMU",
"GMP",
};
const char *client = "?";
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
if (v3d->ver >= 41) {
axi_id = axi_id >> 5;
if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
client = v3d41_axi_ids[axi_id];
}
dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
((intsts & V3D_HUB_INT_MMU_PTI) ?
", pte invalid" : ""),
((intsts & V3D_HUB_INT_MMU_CAP) ?
", cap exceeded" : ""));
status = IRQ_HANDLED;
}
if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
dev_err(v3d->drm.dev, "GMP Violation\n");
status = IRQ_HANDLED;
}
return status;
}
int
v3d_irq_init(struct v3d_dev *v3d)
{
int irq1, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
/* Clear any pending interrupts someone might have left around
* for us.
*/
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
ret = devm_request_irq(v3d->drm.dev, irq1,
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
goto fail;
} else {
v3d->single_irq_line = true;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
goto fail;
}
v3d_irq_enable(v3d);
return 0;
fail:
if (ret != -EPROBE_DEFER)
dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
return ret;
}
void
v3d_irq_enable(struct v3d_dev *v3d)
{
int core;
/* Enable our set of interrupts, masking out any others. */
for (core = 0; core < v3d->cores; core++) {
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS(v3d->ver));
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS(v3d->ver));
}
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS(v3d->ver));
V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS(v3d->ver));
}
void
v3d_irq_disable(struct v3d_dev *v3d)
{
int core;
/* Disable all interrupts. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
/* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
cancel_work_sync(&v3d->overflow_mem_work);
}
/** Reinitializes interrupt registers when a GPU reset is performed. */
void v3d_irq_reset(struct v3d_dev *v3d)
{
v3d_irq_enable(v3d);
}