mirror of
https://github.com/torvalds/linux.git
synced 2025-04-11 04:53:02 +00:00

On ARMv7 / v7m machines read CTR and CLIDR registers to provide information regarding the cache topology. Earlier machines should describe full cache topology in the device tree. Note, this follows the ARM64 cacheinfo support and provides only minimal support required to bootstrap cache info. All useful properties should be decribed in Device Tree. Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
36 lines
896 B
C
36 lines
896 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* arch/arm/include/asm/cache.h
|
|
*/
|
|
#ifndef __ASMARM_CACHE_H
|
|
#define __ASMARM_CACHE_H
|
|
|
|
#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
|
|
/*
|
|
* Memory returned by kmalloc() may be used for DMA, so we must make
|
|
* sure that all such allocations are cache aligned. Otherwise,
|
|
* unrelated code may cause parts of the buffer to be read into the
|
|
* cache before the transfer is done, causing old data to be seen by
|
|
* the CPU.
|
|
*/
|
|
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
|
|
|
/*
|
|
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
|
|
*/
|
|
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
|
|
#define ARCH_SLAB_MINALIGN 8
|
|
#endif
|
|
|
|
#define __read_mostly __section(".data..read_mostly")
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#ifdef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
|
|
int cache_line_size(void);
|
|
#endif
|
|
#endif
|
|
|
|
#endif
|