An reset signal polarity fix for the jd9365da-h3 panel, a folio handling

fix and config fix in nouveau, a dmem cgroup descendant pool handling
 fix, and a missing header for amdxdna.
 -----BEGIN PGP SIGNATURE-----
 
 iJUEABMJAB0WIQTkHFbLp4ejekA/qfgnX84Zoj2+dgUCZ7bn6AAKCRAnX84Zoj2+
 djJnAX9XDHzW0CCJnF8UopQearYcn2DPKrXvLKWwwpSothyoOFiIHyifP7fHlQBX
 XA5+iIQBf1RZq3uTeqbaq3DeD0Pf9LrRUC41g3H7HO9Lt0/Bp2dnRqJJgZVxIg+h
 57yAKxQ5Cw==
 =PwGs
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-fixes-2025-02-20' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

An reset signal polarity fix for the jd9365da-h3 panel, a folio handling
fix and config fix in nouveau, a dmem cgroup descendant pool handling
fix, and a missing header for amdxdna.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250220-glorious-cockle-of-might-5b35f7@houat
This commit is contained in:
Dave Airlie 2025-02-21 09:16:18 +10:00
commit 395436f3bd
6 changed files with 24 additions and 48 deletions

View File

@ -7425,7 +7425,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com> M: Lyude Paul <lyude@redhat.com>
M: Danilo Krummrich <dakr@kernel.org> M: Danilo Krummrich <dakr@kernel.org>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
@ -24064,7 +24063,6 @@ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE) TRACING MMIO ACCESSES (MMIOTRACE)
M: Steven Rostedt <rostedt@goodmis.org> M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org> M: Masami Hiramatsu <mhiramat@kernel.org>
R: Karol Herbst <karolherbst@gmail.com>
R: Pekka Paalanen <ppaalanen@gmail.com> R: Pekka Paalanen <ppaalanen@gmail.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
L: nouveau@lists.freedesktop.org L: nouveau@lists.freedesktop.org

View File

@ -8,6 +8,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/xarray.h> #include <linux/xarray.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS

View File

@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
unsigned long timeout = unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm; struct mm_struct *mm = svmm->notifier.mm;
struct folio *folio;
struct page *page; struct page *page;
unsigned long start = args->p.addr; unsigned long start = args->p.addr;
unsigned long notifier_seq; unsigned long notifier_seq;
@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
folio = page_folio(page);
mutex_lock(&svmm->mutex); mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(&notifier->notifier, if (!mmu_interval_read_retry(&notifier->notifier,
notifier_seq)) notifier_seq))
break; break;
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
folio_unlock(folio);
folio_put(folio);
} }
/* Map the page on the GPU. */ /* Map the page on the GPU. */
@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
out: out:
mmu_interval_notifier_remove(&notifier->notifier); mmu_interval_notifier_remove(&notifier->notifier);

View File

@ -75,7 +75,7 @@ gp10b_pmu_acr = {
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
}; };
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");

View File

@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
if (jadard->desc->lp11_to_reset_delay_ms) if (jadard->desc->lp11_to_reset_delay_ms)
msleep(jadard->desc->lp11_to_reset_delay_ms); msleep(jadard->desc->lp11_to_reset_delay_ms);
gpiod_set_value(jadard->reset, 1); gpiod_set_value(jadard->reset, 0);
msleep(5); msleep(5);
gpiod_set_value(jadard->reset, 0); gpiod_set_value(jadard->reset, 1);
msleep(10); msleep(10);
gpiod_set_value(jadard->reset, 1); gpiod_set_value(jadard->reset, 0);
msleep(130); msleep(130);
ret = jadard->desc->init(jadard); ret = jadard->desc->init(jadard);
@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format; dsi->format = desc->format;
dsi->lanes = desc->lanes; dsi->lanes = desc->lanes;
jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(jadard->reset)) { if (IS_ERR(jadard->reset)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
return PTR_ERR(jadard->reset); return PTR_ERR(jadard->reset);

View File

@ -220,60 +220,32 @@ dmem_cgroup_calculate_protection(struct dmem_cgroup_pool_state *limit_pool,
struct dmem_cgroup_pool_state *test_pool) struct dmem_cgroup_pool_state *test_pool)
{ {
struct page_counter *climit; struct page_counter *climit;
struct cgroup_subsys_state *css, *next_css; struct cgroup_subsys_state *css;
struct dmemcg_state *dmemcg_iter; struct dmemcg_state *dmemcg_iter;
struct dmem_cgroup_pool_state *pool, *parent_pool; struct dmem_cgroup_pool_state *pool, *found_pool;
bool found_descendant;
climit = &limit_pool->cnt; climit = &limit_pool->cnt;
rcu_read_lock(); rcu_read_lock();
parent_pool = pool = limit_pool;
css = &limit_pool->cs->css;
/* css_for_each_descendant_pre(css, &limit_pool->cs->css) {
* This logic is roughly equivalent to css_foreach_descendant_pre,
* except we also track the parent pool to find out which pool we need
* to calculate protection values for.
*
* We can stop the traversal once we find test_pool among the
* descendants since we don't really care about any others.
*/
while (pool != test_pool) {
next_css = css_next_child(NULL, css);
if (next_css) {
parent_pool = pool;
} else {
while (css != &limit_pool->cs->css) {
next_css = css_next_child(css, css->parent);
if (next_css)
break;
css = css->parent;
parent_pool = pool_parent(parent_pool);
}
/*
* We can only hit this when test_pool is not a
* descendant of limit_pool.
*/
if (WARN_ON_ONCE(css == &limit_pool->cs->css))
break;
}
css = next_css;
found_descendant = false;
dmemcg_iter = container_of(css, struct dmemcg_state, css); dmemcg_iter = container_of(css, struct dmemcg_state, css);
found_pool = NULL;
list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) { list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
if (pool_parent(pool) == parent_pool) { if (pool->region == limit_pool->region) {
found_descendant = true; found_pool = pool;
break; break;
} }
} }
if (!found_descendant) if (!found_pool)
continue; continue;
page_counter_calculate_protection( page_counter_calculate_protection(
climit, &pool->cnt, true); climit, &found_pool->cnt, true);
if (found_pool == test_pool)
break;
} }
rcu_read_unlock(); rcu_read_unlock();
} }