linux/mm/ioremap.c
Ryan Roberts 86758b5048 mm/ioremap: pass pgprot_t to ioremap_prot() instead of unsigned long
ioremap_prot() currently accepts pgprot_val parameter as an unsigned long,
thus implicitly assuming that pgprot_val and pgprot_t could never be
bigger than unsigned long.  But this assumption soon will not be true on
arm64 when using D128 pgtables.  In 128 bit page table configuration,
unsigned long is 64 bit, but pgprot_t is 128 bit.

Passing platform abstracted pgprot_t argument is better as compared to
size based data types.  Let's change the parameter to directly pass
pgprot_t like another similar helper generic_ioremap_prot().

Without this change in place, D128 configuration does not work on arm64 as
the top 64 bits gets silently stripped when passing the protection value
to this function.

Link: https://lkml.kernel.org/r/20250218101954.415331-1-anshuman.khandual@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Co-developed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-03-16 22:06:23 -07:00

75 lines
1.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/ioremap.h>
void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot)
{
unsigned long offset, vaddr;
phys_addr_t last_addr;
struct vm_struct *area;
/* An early platform driver might end up here */
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
/* Disallow wrap-around or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/* Page-align mappings */
offset = phys_addr & (~PAGE_MASK);
phys_addr -= offset;
size = PAGE_ALIGN(size + offset);
area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START,
IOREMAP_END, __builtin_return_address(0));
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
area->phys_addr = phys_addr;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
free_vm_area(area);
return NULL;
}
return (void __iomem *)(vaddr + offset);
}
#ifndef ioremap_prot
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot)
{
return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
#endif
void generic_iounmap(volatile void __iomem *addr)
{
void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
if (is_ioremap_addr(vaddr))
vunmap(vaddr);
}
#ifndef iounmap
void iounmap(volatile void __iomem *addr)
{
generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
#endif