mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 06:49:52 +00:00
Merge branch 'for-6.15/extended-linear-cache' into cxl-for-next2
Add support for Extended Linear Cache for CXL. Add enumeration support of the cache. Add MCE notification of the aliased memory address.
This commit is contained in:
commit
763e15d047
@ -177,6 +177,12 @@ Description:
|
||||
The cache write policy: 0 for write-back, 1 for write-through,
|
||||
other or unknown.
|
||||
|
||||
What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/address_mode
|
||||
Date: March 2025
|
||||
Contact: Dave Jiang <dave.jiang@intel.com>
|
||||
Description:
|
||||
The address mode: 0 for reserved, 1 for extended-linear.
|
||||
|
||||
What: /sys/devices/system/node/nodeX/x86/sgx_total_bytes
|
||||
Date: November 2021
|
||||
Contact: Jarkko Sakkinen <jarkko@kernel.org>
|
||||
|
@ -2081,6 +2081,7 @@ int set_mce_nospec(unsigned long pfn)
|
||||
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_mce_nospec);
|
||||
|
||||
/* Restore full speculative operation to the pfn. */
|
||||
int clear_mce_nospec(unsigned long pfn)
|
||||
|
@ -108,6 +108,45 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
|
||||
* @backing_res: resource from the backing media
|
||||
* @nid: node id for the memory region
|
||||
* @cache_size: (Output) size of extended linear cache.
|
||||
*
|
||||
* Return: 0 on success. Errno on failure.
|
||||
*
|
||||
*/
|
||||
int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
|
||||
resource_size_t *cache_size)
|
||||
{
|
||||
unsigned int pxm = node_to_pxm(nid);
|
||||
struct memory_target *target;
|
||||
struct target_cache *tcache;
|
||||
struct resource *res;
|
||||
|
||||
target = find_mem_target(pxm);
|
||||
if (!target)
|
||||
return -ENOENT;
|
||||
|
||||
list_for_each_entry(tcache, &target->caches, node) {
|
||||
if (tcache->cache_attrs.address_mode !=
|
||||
NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR)
|
||||
continue;
|
||||
|
||||
res = &target->memregions;
|
||||
if (!resource_contains(res, backing_res))
|
||||
continue;
|
||||
|
||||
*cache_size = tcache->cache_attrs.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*cache_size = 0;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL");
|
||||
|
||||
static struct memory_target *acpi_find_genport_target(u32 uid)
|
||||
{
|
||||
struct memory_target *target;
|
||||
@ -506,6 +545,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
|
||||
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
|
||||
case ACPI_HMAT_CA_DIRECT_MAPPED:
|
||||
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
|
||||
/* Extended Linear mode is only valid if cache is direct mapped */
|
||||
if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
|
||||
tcache->cache_attrs.address_mode =
|
||||
NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
|
||||
}
|
||||
break;
|
||||
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
|
||||
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
|
||||
|
@ -244,12 +244,14 @@ CACHE_ATTR(size, "%llu")
|
||||
CACHE_ATTR(line_size, "%u")
|
||||
CACHE_ATTR(indexing, "%u")
|
||||
CACHE_ATTR(write_policy, "%u")
|
||||
CACHE_ATTR(address_mode, "%#x")
|
||||
|
||||
static struct attribute *cache_attrs[] = {
|
||||
&dev_attr_indexing.attr,
|
||||
&dev_attr_size.attr,
|
||||
&dev_attr_line_size.attr,
|
||||
&dev_attr_write_policy.attr,
|
||||
&dev_attr_address_mode.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(cache);
|
||||
|
@ -146,4 +146,8 @@ config CXL_REGION_INVALIDATION_TEST
|
||||
If unsure, or if this kernel is meant for production environments,
|
||||
say N.
|
||||
|
||||
config CXL_MCE
|
||||
def_bool y
|
||||
depends on X86_MCE && MEMORY_FAILURE
|
||||
|
||||
endif
|
||||
|
@ -15,5 +15,7 @@ cxl_core-y += hdm.o
|
||||
cxl_core-y += pmu.o
|
||||
cxl_core-y += cdat.o
|
||||
cxl_core-y += ras.o
|
||||
cxl_core-y += acpi.o
|
||||
cxl_core-$(CONFIG_TRACING) += trace.o
|
||||
cxl_core-$(CONFIG_CXL_REGION) += region.o
|
||||
cxl_core-$(CONFIG_CXL_MCE) += mce.o
|
||||
|
11
drivers/cxl/core/acpi.c
Normal file
11
drivers/cxl/core/acpi.c
Normal file
@ -0,0 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
|
||||
#include <linux/acpi.h>
|
||||
#include "cxl.h"
|
||||
#include "core.h"
|
||||
|
||||
int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
|
||||
int nid, resource_size_t *size)
|
||||
{
|
||||
return hmat_get_extended_linear_cache_size(backing_res, nid, size);
|
||||
}
|
@ -118,5 +118,7 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
|
||||
int cxl_ras_init(void);
|
||||
void cxl_ras_exit(void);
|
||||
int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port);
|
||||
int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
|
||||
int nid, resource_size_t *size);
|
||||
|
||||
#endif /* __CXL_CORE_H__ */
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "core.h"
|
||||
#include "trace.h"
|
||||
#include "mce.h"
|
||||
|
||||
static bool cxl_raw_allow_all;
|
||||
|
||||
@ -871,7 +872,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
}
|
||||
|
||||
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
|
||||
u64 dpa, hpa = ULLONG_MAX;
|
||||
u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
|
||||
struct cxl_region *cxlr;
|
||||
|
||||
/*
|
||||
@ -884,14 +885,20 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
|
||||
|
||||
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
|
||||
cxlr = cxl_dpa_to_region(cxlmd, dpa);
|
||||
if (cxlr)
|
||||
if (cxlr) {
|
||||
u64 cache_size = cxlr->params.cache_size;
|
||||
|
||||
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
|
||||
if (cache_size)
|
||||
hpa_alias = hpa - cache_size;
|
||||
}
|
||||
|
||||
if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
|
||||
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
|
||||
&evt->gen_media);
|
||||
hpa_alias, &evt->gen_media);
|
||||
else if (event_type == CXL_CPER_EVENT_DRAM)
|
||||
trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
|
||||
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
|
||||
&evt->dram);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
|
||||
@ -1451,6 +1458,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
|
||||
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
|
||||
{
|
||||
struct cxl_memdev_state *mds;
|
||||
int rc;
|
||||
|
||||
mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
|
||||
if (!mds) {
|
||||
@ -1464,6 +1472,10 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
|
||||
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
|
||||
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
|
||||
|
||||
rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
return mds;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
|
||||
|
65
drivers/cxl/core/mce.c
Normal file
65
drivers/cxl/core/mce.c
Normal file
@ -0,0 +1,65 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
|
||||
#include <linux/mm.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <asm/mce.h>
|
||||
#include <cxlmem.h>
|
||||
#include "mce.h"
|
||||
|
||||
static int cxl_handle_mce(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct cxl_memdev_state *mds = container_of(nb, struct cxl_memdev_state,
|
||||
mce_notifier);
|
||||
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
|
||||
struct cxl_port *endpoint = cxlmd->endpoint;
|
||||
struct mce *mce = data;
|
||||
u64 spa, spa_alias;
|
||||
unsigned long pfn;
|
||||
|
||||
if (!mce || !mce_usable_address(mce))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!endpoint)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spa = mce->addr & MCI_ADDR_PHYSADDR;
|
||||
|
||||
pfn = spa >> PAGE_SHIFT;
|
||||
if (!pfn_valid(pfn))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spa_alias = cxl_port_get_spa_cache_alias(endpoint, spa);
|
||||
if (spa_alias == ~0ULL)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
pfn = spa_alias >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* Take down the aliased memory page. The original memory page flagged
|
||||
* by the MCE will be taken cared of by the standard MCE handler.
|
||||
*/
|
||||
dev_emerg(mds->cxlds.dev, "Offlining aliased SPA address0: %#llx\n",
|
||||
spa_alias);
|
||||
if (!memory_failure(pfn, 0))
|
||||
set_mce_nospec(pfn);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void cxl_unregister_mce_notifier(void *mce_notifier)
|
||||
{
|
||||
mce_unregister_decode_chain(mce_notifier);
|
||||
}
|
||||
|
||||
int devm_cxl_register_mce_notifier(struct device *dev,
|
||||
struct notifier_block *mce_notifier)
|
||||
{
|
||||
mce_notifier->notifier_call = cxl_handle_mce;
|
||||
mce_notifier->priority = MCE_PRIO_UC;
|
||||
mce_register_decode_chain(mce_notifier);
|
||||
|
||||
return devm_add_action_or_reset(dev, cxl_unregister_mce_notifier,
|
||||
mce_notifier);
|
||||
}
|
20
drivers/cxl/core/mce.h
Normal file
20
drivers/cxl/core/mce.h
Normal file
@ -0,0 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
|
||||
#ifndef _CXL_CORE_MCE_H_
|
||||
#define _CXL_CORE_MCE_H_
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#ifdef CONFIG_CXL_MCE
|
||||
int devm_cxl_register_mce_notifier(struct device *dev,
|
||||
struct notifier_block *mce_notifer);
|
||||
#else
|
||||
static inline int
|
||||
devm_cxl_register_mce_notifier(struct device *dev,
|
||||
struct notifier_block *mce_notifier)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -832,6 +832,21 @@ static int match_free_decoder(struct device *dev, const void *data)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool region_res_match_cxl_range(const struct cxl_region_params *p,
|
||||
struct range *range)
|
||||
{
|
||||
if (!p->res)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If an extended linear cache region then the CXL range is assumed
|
||||
* to be fronted by the DRAM range in current known implementation.
|
||||
* This assumption will be made until a variant implementation exists.
|
||||
*/
|
||||
return p->res->start + p->cache_size == range->start &&
|
||||
p->res->end == range->end;
|
||||
}
|
||||
|
||||
static int match_auto_decoder(struct device *dev, const void *data)
|
||||
{
|
||||
const struct cxl_region_params *p = data;
|
||||
@ -844,7 +859,7 @@ static int match_auto_decoder(struct device *dev, const void *data)
|
||||
cxld = to_cxl_decoder(dev);
|
||||
r = &cxld->hpa_range;
|
||||
|
||||
if (p->res && p->res->start == r->start && p->res->end == r->end)
|
||||
if (region_res_match_cxl_range(p, r))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
@ -1432,8 +1447,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
|
||||
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
|
||||
if (cxld->interleave_ways != iw ||
|
||||
cxld->interleave_granularity != ig ||
|
||||
cxld->hpa_range.start != p->res->start ||
|
||||
cxld->hpa_range.end != p->res->end ||
|
||||
!region_res_match_cxl_range(p, &cxld->hpa_range) ||
|
||||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
|
||||
dev_err(&cxlr->dev,
|
||||
"%s:%s %s expected iw: %d ig: %d %pr\n",
|
||||
@ -1960,13 +1974,13 @@ static int cxl_region_attach(struct cxl_region *cxlr,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (resource_size(cxled->dpa_res) * p->interleave_ways !=
|
||||
if (resource_size(cxled->dpa_res) * p->interleave_ways + p->cache_size !=
|
||||
resource_size(p->res)) {
|
||||
dev_dbg(&cxlr->dev,
|
||||
"%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
|
||||
"%s:%s-size-%#llx * ways-%d + cache-%#llx != region-size-%#llx\n",
|
||||
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
|
||||
(u64)resource_size(cxled->dpa_res), p->interleave_ways,
|
||||
(u64)resource_size(p->res));
|
||||
(u64)p->cache_size, (u64)resource_size(p->res));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2920,7 +2934,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
|
||||
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
|
||||
|
||||
/* Apply the hpa_offset to the region base address */
|
||||
hpa = hpa_offset + p->res->start;
|
||||
hpa = hpa_offset + p->res->start + p->cache_size;
|
||||
|
||||
/* Root decoder translation overrides typical modulo decode */
|
||||
if (cxlrd->hpa_to_spa)
|
||||
@ -3215,6 +3229,52 @@ static int match_region_by_range(struct device *dev, const void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
|
||||
struct resource *res)
|
||||
{
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
int nid = phys_to_target_node(res->start);
|
||||
resource_size_t size, cache_size, start;
|
||||
int rc;
|
||||
|
||||
size = resource_size(res);
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!cache_size)
|
||||
return 0;
|
||||
|
||||
if (size != cache_size) {
|
||||
dev_warn(&cxlr->dev,
|
||||
"Extended Linear Cache size %lld != CXL size %lld. No Support!",
|
||||
cache_size, size);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move the start of the range to where the cache range starts. The
|
||||
* implementation assumes that the cache range is in front of the
|
||||
* CXL range. This is not dictated by the HMAT spec but is how the
|
||||
* current known implementation is configured.
|
||||
*
|
||||
* The cache range is expected to be within the CFMWS. The adjusted
|
||||
* res->start should not be less than cxlrd->res->start.
|
||||
*/
|
||||
start = res->start - cache_size;
|
||||
if (start < cxlrd->res->start)
|
||||
return -ENXIO;
|
||||
|
||||
res->start = start;
|
||||
p->cache_size = cache_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __construct_region(struct cxl_region *cxlr,
|
||||
struct cxl_root_decoder *cxlrd,
|
||||
struct cxl_endpoint_decoder *cxled)
|
||||
@ -3243,6 +3303,18 @@ static int __construct_region(struct cxl_region *cxlr,
|
||||
|
||||
*res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
|
||||
dev_name(&cxlr->dev));
|
||||
|
||||
rc = cxl_extended_linear_cache_resize(cxlr, res);
|
||||
if (rc) {
|
||||
/*
|
||||
* Failing to support extended linear cache region resize does not
|
||||
* prevent the region from functioning. Only causes cxl list showing
|
||||
* incorrect region size.
|
||||
*/
|
||||
dev_warn(cxlmd->dev.parent,
|
||||
"Extended linear cache calculation failed.\n");
|
||||
}
|
||||
|
||||
rc = insert_resource(cxlrd->res, res);
|
||||
if (rc) {
|
||||
/*
|
||||
@ -3374,6 +3446,34 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
|
||||
|
||||
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
|
||||
{
|
||||
struct cxl_region_ref *iter;
|
||||
unsigned long index;
|
||||
|
||||
if (!endpoint)
|
||||
return ~0ULL;
|
||||
|
||||
guard(rwsem_write)(&cxl_region_rwsem);
|
||||
|
||||
xa_for_each(&endpoint->regions, index, iter) {
|
||||
struct cxl_region_params *p = &iter->region->params;
|
||||
|
||||
if (p->res->start <= spa && spa <= p->res->end) {
|
||||
if (!p->cache_size)
|
||||
return ~0ULL;
|
||||
|
||||
if (spa > p->res->start + p->cache_size)
|
||||
return spa - p->cache_size;
|
||||
|
||||
return spa + p->cache_size;
|
||||
}
|
||||
}
|
||||
|
||||
return ~0ULL;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_port_get_spa_cache_alias, "CXL");
|
||||
|
||||
static int is_system_ram(struct resource *res, void *arg)
|
||||
{
|
||||
struct cxl_region *cxlr = arg;
|
||||
|
@ -439,9 +439,10 @@ TRACE_EVENT(cxl_generic_event,
|
||||
TRACE_EVENT(cxl_general_media,
|
||||
|
||||
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
|
||||
struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
|
||||
struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
|
||||
struct cxl_event_gen_media *rec),
|
||||
|
||||
TP_ARGS(cxlmd, log, cxlr, hpa, rec),
|
||||
TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
CXL_EVT_TP_entry
|
||||
@ -455,6 +456,7 @@ TRACE_EVENT(cxl_general_media,
|
||||
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
|
||||
/* Following are out of order to pack trace record */
|
||||
__field(u64, hpa)
|
||||
__field(u64, hpa_alias0)
|
||||
__field_struct(uuid_t, region_uuid)
|
||||
__field(u16, validity_flags)
|
||||
__field(u8, rank)
|
||||
@ -485,6 +487,7 @@ TRACE_EVENT(cxl_general_media,
|
||||
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
|
||||
__entry->validity_flags = get_unaligned_le16(&rec->media_hdr.validity_flags);
|
||||
__entry->hpa = hpa;
|
||||
__entry->hpa_alias0 = hpa_alias0;
|
||||
if (cxlr) {
|
||||
__assign_str(region_name);
|
||||
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
|
||||
@ -502,7 +505,7 @@ TRACE_EVENT(cxl_general_media,
|
||||
"device=%x validity_flags='%s' " \
|
||||
"comp_id=%s comp_id_pldm_valid_flags='%s' " \
|
||||
"pldm_entity_id=%s pldm_resource_id=%s " \
|
||||
"hpa=%llx region=%s region_uuid=%pUb " \
|
||||
"hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
|
||||
"cme_threshold_ev_flags='%s' cme_count=%u",
|
||||
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
|
||||
show_event_desc_flags(__entry->descriptor),
|
||||
@ -517,7 +520,7 @@ TRACE_EVENT(cxl_general_media,
|
||||
CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
|
||||
show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
|
||||
CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
|
||||
__entry->hpa, __get_str(region_name), &__entry->region_uuid,
|
||||
__entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
|
||||
show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
|
||||
)
|
||||
);
|
||||
@ -576,9 +579,10 @@ TRACE_EVENT(cxl_general_media,
|
||||
TRACE_EVENT(cxl_dram,
|
||||
|
||||
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
|
||||
struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
|
||||
struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
|
||||
struct cxl_event_dram *rec),
|
||||
|
||||
TP_ARGS(cxlmd, log, cxlr, hpa, rec),
|
||||
TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
CXL_EVT_TP_entry
|
||||
@ -594,6 +598,7 @@ TRACE_EVENT(cxl_dram,
|
||||
__field(u32, row)
|
||||
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
|
||||
__field(u64, hpa)
|
||||
__field(u64, hpa_alias0)
|
||||
__field_struct(uuid_t, region_uuid)
|
||||
__field(u8, rank) /* Out of order to pack trace record */
|
||||
__field(u8, bank_group) /* Out of order to pack trace record */
|
||||
@ -631,6 +636,7 @@ TRACE_EVENT(cxl_dram,
|
||||
memcpy(__entry->cor_mask, &rec->correction_mask,
|
||||
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
|
||||
__entry->hpa = hpa;
|
||||
__entry->hpa_alias0 = hpa_alias0;
|
||||
if (cxlr) {
|
||||
__assign_str(region_name);
|
||||
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
|
||||
@ -651,7 +657,7 @@ TRACE_EVENT(cxl_dram,
|
||||
"validity_flags='%s' " \
|
||||
"comp_id=%s comp_id_pldm_valid_flags='%s' " \
|
||||
"pldm_entity_id=%s pldm_resource_id=%s " \
|
||||
"hpa=%llx region=%s region_uuid=%pUb " \
|
||||
"hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
|
||||
"sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
|
||||
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
|
||||
show_event_desc_flags(__entry->descriptor),
|
||||
@ -669,7 +675,7 @@ TRACE_EVENT(cxl_dram,
|
||||
CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
|
||||
show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
|
||||
CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
|
||||
__entry->hpa, __get_str(region_name), &__entry->region_uuid,
|
||||
__entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
|
||||
__entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
|
||||
__entry->cvme_count
|
||||
)
|
||||
@ -917,6 +923,7 @@ TRACE_EVENT(cxl_poison,
|
||||
__string(region, cxlr ? dev_name(&cxlr->dev) : "")
|
||||
__field(u64, overflow_ts)
|
||||
__field(u64, hpa)
|
||||
__field(u64, hpa_alias0)
|
||||
__field(u64, dpa)
|
||||
__field(u32, dpa_length)
|
||||
__array(char, uuid, 16)
|
||||
@ -939,16 +946,22 @@ TRACE_EVENT(cxl_poison,
|
||||
memcpy(__entry->uuid, &cxlr->params.uuid, 16);
|
||||
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
|
||||
__entry->dpa);
|
||||
if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
|
||||
__entry->hpa_alias0 = __entry->hpa +
|
||||
cxlr->params.cache_size;
|
||||
else
|
||||
__entry->hpa_alias0 = ULLONG_MAX;
|
||||
} else {
|
||||
__assign_str(region);
|
||||
memset(__entry->uuid, 0, 16);
|
||||
__entry->hpa = ULLONG_MAX;
|
||||
__entry->hpa_alias0 = ULLONG_MAX;
|
||||
}
|
||||
),
|
||||
|
||||
TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \
|
||||
"region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \
|
||||
"source=%s flags=%s overflow_time=%llu",
|
||||
"region_uuid=%pU hpa=0x%llx hpa_alias0=0x%llx dpa=0x%llx " \
|
||||
"dpa_length=0x%x source=%s flags=%s overflow_time=%llu",
|
||||
__get_str(memdev),
|
||||
__get_str(host),
|
||||
__entry->serial,
|
||||
@ -956,6 +969,7 @@ TRACE_EVENT(cxl_poison,
|
||||
__get_str(region),
|
||||
__entry->uuid,
|
||||
__entry->hpa,
|
||||
__entry->hpa_alias0,
|
||||
__entry->dpa,
|
||||
__entry->dpa_length,
|
||||
show_poison_source(__entry->source),
|
||||
|
@ -467,6 +467,7 @@ enum cxl_config_state {
|
||||
* @res: allocated iomem capacity for this region
|
||||
* @targets: active ordered targets in current decoder configuration
|
||||
* @nr_targets: number of targets
|
||||
* @cache_size: extended linear cache size if exists, otherwise zero.
|
||||
*
|
||||
* State transitions are protected by the cxl_region_rwsem
|
||||
*/
|
||||
@ -478,6 +479,7 @@ struct cxl_region_params {
|
||||
struct resource *res;
|
||||
struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
|
||||
int nr_targets;
|
||||
resource_size_t cache_size;
|
||||
};
|
||||
|
||||
enum cxl_partition_mode {
|
||||
@ -857,6 +859,7 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
|
||||
int cxl_add_to_region(struct cxl_port *root,
|
||||
struct cxl_endpoint_decoder *cxled);
|
||||
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
|
||||
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
|
||||
#else
|
||||
static inline bool is_cxl_pmem_region(struct device *dev)
|
||||
{
|
||||
@ -875,6 +878,11 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
|
||||
u64 spa)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void cxl_endpoint_parse_cdat(struct cxl_port *port);
|
||||
|
@ -510,6 +510,7 @@ static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
|
||||
* @poison: poison driver state info
|
||||
* @security: security driver state info
|
||||
* @fw: firmware upload / activation state
|
||||
* @mce_notifier: MCE notifier
|
||||
*
|
||||
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
|
||||
* details on capacity parameters.
|
||||
@ -531,6 +532,7 @@ struct cxl_memdev_state {
|
||||
struct cxl_poison_state poison;
|
||||
struct cxl_security_state security;
|
||||
struct cxl_fw_state fw;
|
||||
struct notifier_block mce_notifier;
|
||||
};
|
||||
|
||||
static inline struct cxl_memdev_state *
|
||||
|
@ -1095,6 +1095,17 @@ static inline acpi_handle acpi_get_processor_handle(int cpu)
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_HMAT
|
||||
int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
|
||||
resource_size_t *size);
|
||||
#else
|
||||
static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res,
|
||||
int nid, resource_size_t *size)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void arch_post_acpi_subsys_init(void);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
|
@ -57,6 +57,11 @@ enum cache_write_policy {
|
||||
NODE_CACHE_WRITE_OTHER,
|
||||
};
|
||||
|
||||
enum cache_mode {
|
||||
NODE_CACHE_ADDR_MODE_RESERVED,
|
||||
NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct node_cache_attrs - system memory caching attributes
|
||||
*
|
||||
@ -65,6 +70,7 @@ enum cache_write_policy {
|
||||
* @size: Total size of cache in bytes
|
||||
* @line_size: Number of bytes fetched on a cache miss
|
||||
* @level: The cache hierarchy level
|
||||
* @address_mode: The address mode
|
||||
*/
|
||||
struct node_cache_attrs {
|
||||
enum cache_indexing indexing;
|
||||
@ -72,6 +78,7 @@ struct node_cache_attrs {
|
||||
u64 size;
|
||||
u16 line_size;
|
||||
u8 level;
|
||||
u16 address_mode;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HMEM_REPORTING
|
||||
|
@ -62,8 +62,10 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/ras.o
|
||||
cxl_core-y += $(CXL_CORE_SRC)/acpi.o
|
||||
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
|
||||
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
|
||||
cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
|
||||
cxl_core-y += config_check.o
|
||||
cxl_core-y += cxl_core_test.o
|
||||
cxl_core-y += cxl_core_exports.o
|
||||
|
Loading…
x
Reference in New Issue
Block a user