mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 06:49:52 +00:00

Uros Bizjak uses x86 named address space qualifiers to provide compile-time checking of percpu area accesses. This has caused a small amount of fallout - two or three issues were reported. In all cases the calling code was founf to be incorrect. - The 4 patch series "Some cleanup for memcg" from Chen Ridong implements some relatively monir cleanups for the memcontrol code. - The 17 patch series "mm: fixes for device-exclusive entries (hmm)" from David Hildenbrand fixes a boatload of issues which David found then using device-exclusive PTE entries when THP is enabled. More work is needed, but this makes thins better - our own HMM selftests now succeed. - The 2 patch series "mm: zswap: remove z3fold and zbud" from Yosry Ahmed remove the z3fold and zbud implementations. They have been deprecated for half a year and nobody has complained. - The 5 patch series "mm: further simplify VMA merge operation" from Lorenzo Stoakes implements numerous simplifications in this area. No runtime effects are anticipated. - The 4 patch series "mm/madvise: remove redundant mmap_lock operations from process_madvise()" from SeongJae Park rationalizes the locking in the madvise() implementation. Performance gains of 20-25% were observed in one MADV_DONTNEED microbenchmark. - The 12 patch series "Tiny cleanup and improvements about SWAP code" from Baoquan He contains a number of touchups to issues which Baoquan noticed when working on the swap code. - The 2 patch series "mm: kmemleak: Usability improvements" from Catalin Marinas implements a couple of improvements to the kmemleak user-visible output. - The 2 patch series "mm/damon/paddr: fix large folios access and schemes handling" from Usama Arif provides a couple of fixes for DAMON's handling of large folios. - The 3 patch series "mm/damon/core: fix wrong and/or useless damos_walk() behaviors" from SeongJae Park fixes a few issues with the accuracy of kdamond's walking of DAMON regions. - The 3 patch series "expose mapping wrprotect, fix fb_defio use" from Lorenzo Stoakes changes the interaction between framebuffer deferred-io and core MM. No functional changes are anticipated - this is preparatory work for the future removal of page structure fields. - The 4 patch series "mm/damon: add support for hugepage_size DAMOS filter" from Usama Arif adds a DAMOS filter which permits the filtering by huge page sizes. - The 4 patch series "mm: permit guard regions for file-backed/shmem mappings" from Lorenzo Stoakes extends the guard region feature from its present "anon mappings only" state. The feature now covers shmem and file-backed mappings. - The 4 patch series "mm: batched unmap lazyfree large folios during reclamation" from Barry Song cleans up and speeds up the unmapping for pte-mapped large folios. - The 18 patch series "reimplement per-vma lock as a refcount" from Suren Baghdasaryan puts the vm_lock back into the vma. Our reasons for pulling it out were largely bogus and that change made the code more messy. This patchset provides small (0-10%) improvements on one microbenchmark. - The 5 patch series "Docs/mm/damon: misc DAMOS filters documentation fixes and improves" from SeongJae Park does some maintenance work on the DAMON docs. - The 27 patch series "hugetlb/CMA improvements for large systems" from Frank van der Linden addresses a pile of issues which have been observed when using CMA on large machines. - The 2 patch series "mm/damon: introduce DAMOS filter type for unmapped pages" from SeongJae Park enables users of DMAON/DAMOS to filter my the page's mapped/unmapped status. - The 19 patch series "zsmalloc/zram: there be preemption" from Sergey Senozhatsky teaches zram to run its compression and decompression operations preemptibly. - The 12 patch series "selftests/mm: Some cleanups from trying to run them" from Brendan Jackman fixes a pile of unrelated issues which Brendan encountered while runnimg our selftests. - The 2 patch series "fs/proc/task_mmu: add guard region bit to pagemap" from Lorenzo Stoakes permits userspace to use /proc/pid/pagemap to determine whether a particular page is a guard page. - The 7 patch series "mm, swap: remove swap slot cache" from Kairui Song removes the swap slot cache from the allocation path - it simply wasn't being effective. - The 5 patch series "mm: cleanups for device-exclusive entries (hmm)" from David Hildenbrand implements a number of unrelated cleanups in this code. - The 5 patch series "mm: Rework generic PTDUMP configs" from Anshuman Khandual implements a number of preparatoty cleanups to the GENERIC_PTDUMP Kconfig logic. - The 8 patch series "mm/damon: auto-tune aggregation interval" from SeongJae Park implements a feedback-driven automatic tuning feature for DAMON's aggregation interval tuning. - The 5 patch series "Fix lazy mmu mode" from Ryan Roberts fixes some issues in powerpc, sparc and x86 lazy MMU implementations. Ryan did this in preparation for implementing lazy mmu mode for arm64 to optimize vmalloc. - The 2 patch series "mm/page_alloc: Some clarifications for migratetype fallback" from Brendan Jackman reworks some commentary to make the code easier to follow. - The 3 patch series "page_counter cleanup and size reduction" from Shakeel Butt cleans up the page_counter code and fixes a size increase which we accidentally added late last year. - The 3 patch series "Add a command line option that enables control of how many threads should be used to allocate huge pages" from Thomas Prescher does that. It allows the careful operator to significantly reduce boot time by tuning the parallalization of huge page initialization. - The 3 patch series "Fix calculations in trace_balance_dirty_pages() for cgwb" from Tang Yizhou fixes the tracing output from the dirty page balancing code. - The 9 patch series "mm/damon: make allow filters after reject filters useful and intuitive" from SeongJae Park improves the handling of allow and reject filters. Behaviour is made more consistent and the documention is updated accordingly. - The 5 patch series "Switch zswap to object read/write APIs" from Yosry Ahmed updates zswap to the new object read/write APIs and thus permits the removal of some legacy code from zpool and zsmalloc. - The 6 patch series "Some trivial cleanups for shmem" from Baolin Wang does as it claims. - The 20 patch series "fs/dax: Fix ZONE_DEVICE page reference counts" from Alistair Popple regularizes the weird ZONE_DEVICE page refcount handling in DAX, permittig the removal of a number of special-case checks. - The 4 patch series "refactor mremap and fix bug" from Lorenzo Stoakes is a preparatoty refactoring and cleanup of the mremap() code. - The 20 patch series "mm: MM owner tracking for large folios (!hugetlb) + CONFIG_NO_PAGE_MAPCOUNT" from David Hildenbrand reworks the manner in which we determine whether a large folio is known to be mapped exclusively into a single MM. - The 8 patch series "mm/damon: add sysfs dirs for managing DAMOS filters based on handling layers" from SeongJae Park adds a couple of new sysfs directories to ease the management of DAMON/DAMOS filters. - The 13 patch series "arch, mm: reduce code duplication in mem_init()" from Mike Rapoport consolidates many per-arch implementations of mem_init() into code generic code, where that is practical. - The 13 patch series "mm/damon/sysfs: commit parameters online via damon_call()" from SeongJae Park continues the cleaning up of sysfs access to DAMON internal data. - The 3 patch series "mm: page_ext: Introduce new iteration API" from Luiz Capitulino reworks the page_ext initialization to fix a boot-time crash which was observed with an unusual combination of compile and cmdline options. - The 8 patch series "Buddy allocator like (or non-uniform) folio split" from Zi Yan reworks the code to split a folio into smaller folios. The main benefit is lessened memory consumption: fewer post-split folios are generated. - The 2 patch series "Minimize xa_node allocation during xarry split" from Zi Yan reduces the number of xarray xa_nodes which are generated during an xarray split. - The 2 patch series "drivers/base/memory: Two cleanups" from Gavin Shan performs some maintenance work on the drivers/base/memory code. - The 3 patch series "Add tracepoints for lowmem reserves, watermarks and totalreserve_pages" from Martin Liu adds some more tracepoints to the page allocator code. - The 4 patch series "mm/madvise: cleanup requests validations and classifications" from SeongJae Park cleans up some warts which SeongJae observed during his earlier madvise work. - The 3 patch series "mm/hwpoison: Fix regressions in memory failure handling" from Shuai Xue addresses two quite serious regressions which Shuai has observed in the memory-failure implementation. - The 5 patch series "mm: reliable huge page allocator" from Johannes Weiner makes huge page allocations cheaper and more reliable by reducing fragmentation. - The 5 patch series "Minor memcg cleanups & prep for memdescs" from Matthew Wilcox is preparatory work for the future implementation of memdescs. - The 4 patch series "track memory used by balloon drivers" from Nico Pache introduces a way to track memory used by our various balloon drivers. - The 2 patch series "mm/damon: introduce DAMOS filter type for active pages" from Nhat Pham permits users to filter for active/inactive pages, separately for file and anon pages. - The 2 patch series "Adding Proactive Memory Reclaim Statistics" from Hao Jia separates the proactive reclaim statistics from the direct reclaim statistics. - The 2 patch series "mm/vmscan: don't try to reclaim hwpoison folio" from Jinjiang Tu fixes our handling of hwpoisoned pages within the reclaim code. -----BEGIN PGP SIGNATURE----- iHQEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZ+nZaAAKCRDdBJ7gKXxA jsOWAPiP4r7CJHMZRK4eyJOkvS1a1r+TsIarrFZtjwvf/GIfAQCEG+JDxVfUaUSF Ee93qSSLR1BkNdDw+931Pu0mXfbnBw== =Pn2K -----END PGP SIGNATURE----- Merge tag 'mm-stable-2025-03-30-16-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: - The series "Enable strict percpu address space checks" from Uros Bizjak uses x86 named address space qualifiers to provide compile-time checking of percpu area accesses. This has caused a small amount of fallout - two or three issues were reported. In all cases the calling code was found to be incorrect. - The series "Some cleanup for memcg" from Chen Ridong implements some relatively monir cleanups for the memcontrol code. - The series "mm: fixes for device-exclusive entries (hmm)" from David Hildenbrand fixes a boatload of issues which David found then using device-exclusive PTE entries when THP is enabled. More work is needed, but this makes thins better - our own HMM selftests now succeed. - The series "mm: zswap: remove z3fold and zbud" from Yosry Ahmed remove the z3fold and zbud implementations. They have been deprecated for half a year and nobody has complained. - The series "mm: further simplify VMA merge operation" from Lorenzo Stoakes implements numerous simplifications in this area. No runtime effects are anticipated. - The series "mm/madvise: remove redundant mmap_lock operations from process_madvise()" from SeongJae Park rationalizes the locking in the madvise() implementation. Performance gains of 20-25% were observed in one MADV_DONTNEED microbenchmark. - The series "Tiny cleanup and improvements about SWAP code" from Baoquan He contains a number of touchups to issues which Baoquan noticed when working on the swap code. - The series "mm: kmemleak: Usability improvements" from Catalin Marinas implements a couple of improvements to the kmemleak user-visible output. - The series "mm/damon/paddr: fix large folios access and schemes handling" from Usama Arif provides a couple of fixes for DAMON's handling of large folios. - The series "mm/damon/core: fix wrong and/or useless damos_walk() behaviors" from SeongJae Park fixes a few issues with the accuracy of kdamond's walking of DAMON regions. - The series "expose mapping wrprotect, fix fb_defio use" from Lorenzo Stoakes changes the interaction between framebuffer deferred-io and core MM. No functional changes are anticipated - this is preparatory work for the future removal of page structure fields. - The series "mm/damon: add support for hugepage_size DAMOS filter" from Usama Arif adds a DAMOS filter which permits the filtering by huge page sizes. - The series "mm: permit guard regions for file-backed/shmem mappings" from Lorenzo Stoakes extends the guard region feature from its present "anon mappings only" state. The feature now covers shmem and file-backed mappings. - The series "mm: batched unmap lazyfree large folios during reclamation" from Barry Song cleans up and speeds up the unmapping for pte-mapped large folios. - The series "reimplement per-vma lock as a refcount" from Suren Baghdasaryan puts the vm_lock back into the vma. Our reasons for pulling it out were largely bogus and that change made the code more messy. This patchset provides small (0-10%) improvements on one microbenchmark. - The series "Docs/mm/damon: misc DAMOS filters documentation fixes and improves" from SeongJae Park does some maintenance work on the DAMON docs. - The series "hugetlb/CMA improvements for large systems" from Frank van der Linden addresses a pile of issues which have been observed when using CMA on large machines. - The series "mm/damon: introduce DAMOS filter type for unmapped pages" from SeongJae Park enables users of DMAON/DAMOS to filter my the page's mapped/unmapped status. - The series "zsmalloc/zram: there be preemption" from Sergey Senozhatsky teaches zram to run its compression and decompression operations preemptibly. - The series "selftests/mm: Some cleanups from trying to run them" from Brendan Jackman fixes a pile of unrelated issues which Brendan encountered while runnimg our selftests. - The series "fs/proc/task_mmu: add guard region bit to pagemap" from Lorenzo Stoakes permits userspace to use /proc/pid/pagemap to determine whether a particular page is a guard page. - The series "mm, swap: remove swap slot cache" from Kairui Song removes the swap slot cache from the allocation path - it simply wasn't being effective. - The series "mm: cleanups for device-exclusive entries (hmm)" from David Hildenbrand implements a number of unrelated cleanups in this code. - The series "mm: Rework generic PTDUMP configs" from Anshuman Khandual implements a number of preparatoty cleanups to the GENERIC_PTDUMP Kconfig logic. - The series "mm/damon: auto-tune aggregation interval" from SeongJae Park implements a feedback-driven automatic tuning feature for DAMON's aggregation interval tuning. - The series "Fix lazy mmu mode" from Ryan Roberts fixes some issues in powerpc, sparc and x86 lazy MMU implementations. Ryan did this in preparation for implementing lazy mmu mode for arm64 to optimize vmalloc. - The series "mm/page_alloc: Some clarifications for migratetype fallback" from Brendan Jackman reworks some commentary to make the code easier to follow. - The series "page_counter cleanup and size reduction" from Shakeel Butt cleans up the page_counter code and fixes a size increase which we accidentally added late last year. - The series "Add a command line option that enables control of how many threads should be used to allocate huge pages" from Thomas Prescher does that. It allows the careful operator to significantly reduce boot time by tuning the parallalization of huge page initialization. - The series "Fix calculations in trace_balance_dirty_pages() for cgwb" from Tang Yizhou fixes the tracing output from the dirty page balancing code. - The series "mm/damon: make allow filters after reject filters useful and intuitive" from SeongJae Park improves the handling of allow and reject filters. Behaviour is made more consistent and the documention is updated accordingly. - The series "Switch zswap to object read/write APIs" from Yosry Ahmed updates zswap to the new object read/write APIs and thus permits the removal of some legacy code from zpool and zsmalloc. - The series "Some trivial cleanups for shmem" from Baolin Wang does as it claims. - The series "fs/dax: Fix ZONE_DEVICE page reference counts" from Alistair Popple regularizes the weird ZONE_DEVICE page refcount handling in DAX, permittig the removal of a number of special-case checks. - The series "refactor mremap and fix bug" from Lorenzo Stoakes is a preparatoty refactoring and cleanup of the mremap() code. - The series "mm: MM owner tracking for large folios (!hugetlb) + CONFIG_NO_PAGE_MAPCOUNT" from David Hildenbrand reworks the manner in which we determine whether a large folio is known to be mapped exclusively into a single MM. - The series "mm/damon: add sysfs dirs for managing DAMOS filters based on handling layers" from SeongJae Park adds a couple of new sysfs directories to ease the management of DAMON/DAMOS filters. - The series "arch, mm: reduce code duplication in mem_init()" from Mike Rapoport consolidates many per-arch implementations of mem_init() into code generic code, where that is practical. - The series "mm/damon/sysfs: commit parameters online via damon_call()" from SeongJae Park continues the cleaning up of sysfs access to DAMON internal data. - The series "mm: page_ext: Introduce new iteration API" from Luiz Capitulino reworks the page_ext initialization to fix a boot-time crash which was observed with an unusual combination of compile and cmdline options. - The series "Buddy allocator like (or non-uniform) folio split" from Zi Yan reworks the code to split a folio into smaller folios. The main benefit is lessened memory consumption: fewer post-split folios are generated. - The series "Minimize xa_node allocation during xarry split" from Zi Yan reduces the number of xarray xa_nodes which are generated during an xarray split. - The series "drivers/base/memory: Two cleanups" from Gavin Shan performs some maintenance work on the drivers/base/memory code. - The series "Add tracepoints for lowmem reserves, watermarks and totalreserve_pages" from Martin Liu adds some more tracepoints to the page allocator code. - The series "mm/madvise: cleanup requests validations and classifications" from SeongJae Park cleans up some warts which SeongJae observed during his earlier madvise work. - The series "mm/hwpoison: Fix regressions in memory failure handling" from Shuai Xue addresses two quite serious regressions which Shuai has observed in the memory-failure implementation. - The series "mm: reliable huge page allocator" from Johannes Weiner makes huge page allocations cheaper and more reliable by reducing fragmentation. - The series "Minor memcg cleanups & prep for memdescs" from Matthew Wilcox is preparatory work for the future implementation of memdescs. - The series "track memory used by balloon drivers" from Nico Pache introduces a way to track memory used by our various balloon drivers. - The series "mm/damon: introduce DAMOS filter type for active pages" from Nhat Pham permits users to filter for active/inactive pages, separately for file and anon pages. - The series "Adding Proactive Memory Reclaim Statistics" from Hao Jia separates the proactive reclaim statistics from the direct reclaim statistics. - The series "mm/vmscan: don't try to reclaim hwpoison folio" from Jinjiang Tu fixes our handling of hwpoisoned pages within the reclaim code. * tag 'mm-stable-2025-03-30-16-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (431 commits) mm/page_alloc: remove unnecessary __maybe_unused in order_to_pindex() x86/mm: restore early initialization of high_memory for 32-bits mm/vmscan: don't try to reclaim hwpoison folio mm/hwpoison: introduce folio_contain_hwpoisoned_page() helper cgroup: docs: add pswpin and pswpout items in cgroup v2 doc mm: vmscan: split proactive reclaim statistics from direct reclaim statistics selftests/mm: speed up split_huge_page_test selftests/mm: uffd-unit-tests support for hugepages > 2M docs/mm/damon/design: document active DAMOS filter type mm/damon: implement a new DAMOS filter type for active pages fs/dax: don't disassociate zero page entries MM documentation: add "Unaccepted" meminfo entry selftests/mm: add commentary about 9pfs bugs fork: use __vmalloc_node() for stack allocation docs/mm: Physical Memory: Populate the "Zones" section xen: balloon: update the NR_BALLOON_PAGES state hv_balloon: update the NR_BALLOON_PAGES state balloon_compaction: update the NR_BALLOON_PAGES state meminfo: add a per node counter for balloon drivers mm: remove references to folio in __memcg_kmem_uncharge_page() ...
1191 lines
28 KiB
C
1191 lines
28 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright IBM Corp. 2012
|
|
*
|
|
* Author(s):
|
|
* Jan Glauber <jang@linux.vnet.ibm.com>
|
|
*
|
|
* The System z PCI code is a rewrite from a prototype by
|
|
* the following people (Kudoz!):
|
|
* Alexander Schmidt
|
|
* Christoph Raisch
|
|
* Hannes Hering
|
|
* Hoang-Nam Nguyen
|
|
* Jan-Bernd Themann
|
|
* Stefan Roscher
|
|
* Thomas Klein
|
|
*/
|
|
|
|
#define KMSG_COMPONENT "zpci"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/err.h>
|
|
#include <linux/export.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/lockdep.h>
|
|
#include <linux/list_sort.h>
|
|
|
|
#include <asm/machine.h>
|
|
#include <asm/isc.h>
|
|
#include <asm/airq.h>
|
|
#include <asm/facility.h>
|
|
#include <asm/pci_insn.h>
|
|
#include <asm/pci_clp.h>
|
|
#include <asm/pci_dma.h>
|
|
|
|
#include "pci_bus.h"
|
|
#include "pci_iov.h"
|
|
|
|
/* list of all detected zpci devices */
|
|
static LIST_HEAD(zpci_list);
|
|
static DEFINE_SPINLOCK(zpci_list_lock);
|
|
|
|
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
|
|
static DEFINE_SPINLOCK(zpci_domain_lock);
|
|
|
|
#define ZPCI_IOMAP_ENTRIES \
|
|
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
|
|
ZPCI_IOMAP_MAX_ENTRIES)
|
|
|
|
unsigned int s390_pci_no_rid;
|
|
|
|
static DEFINE_SPINLOCK(zpci_iomap_lock);
|
|
static unsigned long *zpci_iomap_bitmap;
|
|
struct zpci_iomap_entry *zpci_iomap_start;
|
|
EXPORT_SYMBOL_GPL(zpci_iomap_start);
|
|
|
|
DEFINE_STATIC_KEY_FALSE(have_mio);
|
|
|
|
static struct kmem_cache *zdev_fmb_cache;
|
|
|
|
/* AEN structures that must be preserved over KVM module re-insertion */
|
|
union zpci_sic_iib *zpci_aipb;
|
|
EXPORT_SYMBOL_GPL(zpci_aipb);
|
|
struct airq_iv *zpci_aif_sbv;
|
|
EXPORT_SYMBOL_GPL(zpci_aif_sbv);
|
|
|
|
struct zpci_dev *get_zdev_by_fid(u32 fid)
|
|
{
|
|
struct zpci_dev *tmp, *zdev = NULL;
|
|
|
|
spin_lock(&zpci_list_lock);
|
|
list_for_each_entry(tmp, &zpci_list, entry) {
|
|
if (tmp->fid == fid) {
|
|
zdev = tmp;
|
|
zpci_zdev_get(zdev);
|
|
break;
|
|
}
|
|
}
|
|
spin_unlock(&zpci_list_lock);
|
|
return zdev;
|
|
}
|
|
|
|
void zpci_remove_reserved_devices(void)
|
|
{
|
|
struct zpci_dev *tmp, *zdev;
|
|
enum zpci_state state;
|
|
LIST_HEAD(remove);
|
|
|
|
spin_lock(&zpci_list_lock);
|
|
list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
|
|
if (zdev->state == ZPCI_FN_STATE_STANDBY &&
|
|
!clp_get_state(zdev->fid, &state) &&
|
|
state == ZPCI_FN_STATE_RESERVED)
|
|
list_move_tail(&zdev->entry, &remove);
|
|
}
|
|
spin_unlock(&zpci_list_lock);
|
|
|
|
list_for_each_entry_safe(zdev, tmp, &remove, entry)
|
|
zpci_device_reserved(zdev);
|
|
}
|
|
|
|
int pci_domain_nr(struct pci_bus *bus)
|
|
{
|
|
return ((struct zpci_bus *) bus->sysdata)->domain_nr;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_domain_nr);
|
|
|
|
int pci_proc_domain(struct pci_bus *bus)
|
|
{
|
|
return pci_domain_nr(bus);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_proc_domain);
|
|
|
|
/* Modify PCI: Register I/O address translation parameters */
|
|
int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
|
|
u64 base, u64 limit, u64 iota, u8 *status)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
|
|
struct zpci_fib fib = {0};
|
|
u8 cc;
|
|
|
|
fib.pba = base;
|
|
/* Work around off by one in ISM virt device */
|
|
if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
|
|
fib.pal = limit + (1 << 12);
|
|
else
|
|
fib.pal = limit;
|
|
fib.iota = iota;
|
|
fib.gd = zdev->gisa;
|
|
cc = zpci_mod_fc(req, &fib, status);
|
|
if (cc)
|
|
zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
|
|
return cc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(zpci_register_ioat);
|
|
|
|
/* Modify PCI: Unregister I/O address translation parameters */
|
|
int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
|
|
struct zpci_fib fib = {0};
|
|
u8 cc, status;
|
|
|
|
fib.gd = zdev->gisa;
|
|
|
|
cc = zpci_mod_fc(req, &fib, &status);
|
|
if (cc)
|
|
zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
|
|
return cc;
|
|
}
|
|
|
|
/* Modify PCI: Set PCI function measurement parameters */
|
|
int zpci_fmb_enable_device(struct zpci_dev *zdev)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
|
|
struct zpci_iommu_ctrs *ctrs;
|
|
struct zpci_fib fib = {0};
|
|
unsigned long flags;
|
|
u8 cc, status;
|
|
|
|
if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
|
|
return -EINVAL;
|
|
|
|
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
|
|
if (!zdev->fmb)
|
|
return -ENOMEM;
|
|
WARN_ON((u64) zdev->fmb & 0xf);
|
|
|
|
/* reset software counters */
|
|
spin_lock_irqsave(&zdev->dom_lock, flags);
|
|
ctrs = zpci_get_iommu_ctrs(zdev);
|
|
if (ctrs) {
|
|
atomic64_set(&ctrs->mapped_pages, 0);
|
|
atomic64_set(&ctrs->unmapped_pages, 0);
|
|
atomic64_set(&ctrs->global_rpcits, 0);
|
|
atomic64_set(&ctrs->sync_map_rpcits, 0);
|
|
atomic64_set(&ctrs->sync_rpcits, 0);
|
|
}
|
|
spin_unlock_irqrestore(&zdev->dom_lock, flags);
|
|
|
|
|
|
fib.fmb_addr = virt_to_phys(zdev->fmb);
|
|
fib.gd = zdev->gisa;
|
|
cc = zpci_mod_fc(req, &fib, &status);
|
|
if (cc) {
|
|
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
|
|
zdev->fmb = NULL;
|
|
}
|
|
return cc ? -EIO : 0;
|
|
}
|
|
|
|
/* Modify PCI: Disable PCI function measurement */
|
|
int zpci_fmb_disable_device(struct zpci_dev *zdev)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
|
|
struct zpci_fib fib = {0};
|
|
u8 cc, status;
|
|
|
|
if (!zdev->fmb)
|
|
return -EINVAL;
|
|
|
|
fib.gd = zdev->gisa;
|
|
|
|
/* Function measurement is disabled if fmb address is zero */
|
|
cc = zpci_mod_fc(req, &fib, &status);
|
|
if (cc == 3) /* Function already gone. */
|
|
cc = 0;
|
|
|
|
if (!cc) {
|
|
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
|
|
zdev->fmb = NULL;
|
|
}
|
|
return cc ? -EIO : 0;
|
|
}
|
|
|
|
static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
|
|
u64 data;
|
|
int rc;
|
|
|
|
rc = __zpci_load(&data, req, offset);
|
|
if (!rc) {
|
|
data = le64_to_cpu((__force __le64) data);
|
|
data >>= (8 - len) * 8;
|
|
*val = (u32) data;
|
|
} else
|
|
*val = 0xffffffff;
|
|
return rc;
|
|
}
|
|
|
|
static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
|
|
u64 data = val;
|
|
int rc;
|
|
|
|
data <<= (8 - len) * 8;
|
|
data = (__force u64) cpu_to_le64(data);
|
|
rc = __zpci_store(data, req, offset);
|
|
return rc;
|
|
}
|
|
|
|
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|
resource_size_t size,
|
|
resource_size_t align)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
|
pgprot_t prot)
|
|
{
|
|
/*
|
|
* When PCI MIO instructions are unavailable the "physical" address
|
|
* encodes a hint for accessing the PCI memory space it represents.
|
|
* Just pass it unchanged such that ioread/iowrite can decode it.
|
|
*/
|
|
if (!static_branch_unlikely(&have_mio))
|
|
return (void __iomem *)phys_addr;
|
|
|
|
return generic_ioremap_prot(phys_addr, size, prot);
|
|
}
|
|
EXPORT_SYMBOL(ioremap_prot);
|
|
|
|
void iounmap(volatile void __iomem *addr)
|
|
{
|
|
if (static_branch_likely(&have_mio))
|
|
generic_iounmap(addr);
|
|
}
|
|
EXPORT_SYMBOL(iounmap);
|
|
|
|
/* Create a virtual mapping cookie for a PCI BAR */
|
|
static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
|
|
unsigned long offset, unsigned long max)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
int idx;
|
|
|
|
idx = zdev->bars[bar].map_idx;
|
|
spin_lock(&zpci_iomap_lock);
|
|
/* Detect overrun */
|
|
WARN_ON(!++zpci_iomap_start[idx].count);
|
|
zpci_iomap_start[idx].fh = zdev->fh;
|
|
zpci_iomap_start[idx].bar = bar;
|
|
spin_unlock(&zpci_iomap_lock);
|
|
|
|
return (void __iomem *) ZPCI_ADDR(idx) + offset;
|
|
}
|
|
|
|
static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
|
|
unsigned long offset,
|
|
unsigned long max)
|
|
{
|
|
unsigned long barsize = pci_resource_len(pdev, bar);
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
void __iomem *iova;
|
|
|
|
iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
|
|
return iova ? iova + offset : iova;
|
|
}
|
|
|
|
void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
|
|
unsigned long offset, unsigned long max)
|
|
{
|
|
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
|
|
return NULL;
|
|
|
|
if (static_branch_likely(&have_mio))
|
|
return pci_iomap_range_mio(pdev, bar, offset, max);
|
|
else
|
|
return pci_iomap_range_fh(pdev, bar, offset, max);
|
|
}
|
|
EXPORT_SYMBOL(pci_iomap_range);
|
|
|
|
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
|
{
|
|
return pci_iomap_range(dev, bar, 0, maxlen);
|
|
}
|
|
EXPORT_SYMBOL(pci_iomap);
|
|
|
|
static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
|
|
unsigned long offset, unsigned long max)
|
|
{
|
|
unsigned long barsize = pci_resource_len(pdev, bar);
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
void __iomem *iova;
|
|
|
|
iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
|
|
return iova ? iova + offset : iova;
|
|
}
|
|
|
|
void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
|
|
unsigned long offset, unsigned long max)
|
|
{
|
|
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
|
|
return NULL;
|
|
|
|
if (static_branch_likely(&have_mio))
|
|
return pci_iomap_wc_range_mio(pdev, bar, offset, max);
|
|
else
|
|
return pci_iomap_range_fh(pdev, bar, offset, max);
|
|
}
|
|
EXPORT_SYMBOL(pci_iomap_wc_range);
|
|
|
|
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
|
|
{
|
|
return pci_iomap_wc_range(dev, bar, 0, maxlen);
|
|
}
|
|
EXPORT_SYMBOL(pci_iomap_wc);
|
|
|
|
static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
|
|
{
|
|
unsigned int idx = ZPCI_IDX(addr);
|
|
|
|
spin_lock(&zpci_iomap_lock);
|
|
/* Detect underrun */
|
|
WARN_ON(!zpci_iomap_start[idx].count);
|
|
if (!--zpci_iomap_start[idx].count) {
|
|
zpci_iomap_start[idx].fh = 0;
|
|
zpci_iomap_start[idx].bar = 0;
|
|
}
|
|
spin_unlock(&zpci_iomap_lock);
|
|
}
|
|
|
|
static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
|
|
{
|
|
iounmap(addr);
|
|
}
|
|
|
|
void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
|
|
{
|
|
if (static_branch_likely(&have_mio))
|
|
pci_iounmap_mio(pdev, addr);
|
|
else
|
|
pci_iounmap_fh(pdev, addr);
|
|
}
|
|
EXPORT_SYMBOL(pci_iounmap);
|
|
|
|
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
|
|
int size, u32 *val)
|
|
{
|
|
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
|
|
|
|
return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
|
|
}
|
|
|
|
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
|
|
int size, u32 val)
|
|
{
|
|
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
|
|
|
|
return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
|
|
}
|
|
|
|
static struct pci_ops pci_root_ops = {
|
|
.read = pci_read,
|
|
.write = pci_write,
|
|
};
|
|
|
|
static void zpci_map_resources(struct pci_dev *pdev)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
resource_size_t len;
|
|
int i;
|
|
|
|
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
|
len = pci_resource_len(pdev, i);
|
|
if (!len)
|
|
continue;
|
|
|
|
if (zpci_use_mio(zdev))
|
|
pdev->resource[i].start =
|
|
(resource_size_t __force) zdev->bars[i].mio_wt;
|
|
else
|
|
pdev->resource[i].start = (resource_size_t __force)
|
|
pci_iomap_range_fh(pdev, i, 0, 0);
|
|
pdev->resource[i].end = pdev->resource[i].start + len - 1;
|
|
}
|
|
|
|
zpci_iov_map_resources(pdev);
|
|
}
|
|
|
|
static void zpci_unmap_resources(struct pci_dev *pdev)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
resource_size_t len;
|
|
int i;
|
|
|
|
if (zpci_use_mio(zdev))
|
|
return;
|
|
|
|
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
|
len = pci_resource_len(pdev, i);
|
|
if (!len)
|
|
continue;
|
|
pci_iounmap_fh(pdev, (void __iomem __force *)
|
|
pdev->resource[i].start);
|
|
}
|
|
}
|
|
|
|
static int zpci_alloc_iomap(struct zpci_dev *zdev)
|
|
{
|
|
unsigned long entry;
|
|
|
|
spin_lock(&zpci_iomap_lock);
|
|
entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
|
|
if (entry == ZPCI_IOMAP_ENTRIES) {
|
|
spin_unlock(&zpci_iomap_lock);
|
|
return -ENOSPC;
|
|
}
|
|
set_bit(entry, zpci_iomap_bitmap);
|
|
spin_unlock(&zpci_iomap_lock);
|
|
return entry;
|
|
}
|
|
|
|
static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
|
|
{
|
|
spin_lock(&zpci_iomap_lock);
|
|
memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
|
|
clear_bit(entry, zpci_iomap_bitmap);
|
|
spin_unlock(&zpci_iomap_lock);
|
|
}
|
|
|
|
static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
|
|
{
|
|
int bar, idx;
|
|
|
|
spin_lock(&zpci_iomap_lock);
|
|
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
|
if (!zdev->bars[bar].size)
|
|
continue;
|
|
idx = zdev->bars[bar].map_idx;
|
|
if (!zpci_iomap_start[idx].count)
|
|
continue;
|
|
WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
|
|
}
|
|
spin_unlock(&zpci_iomap_lock);
|
|
}
|
|
|
|
void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
|
|
{
|
|
if (!fh || zdev->fh == fh)
|
|
return;
|
|
|
|
zdev->fh = fh;
|
|
if (zpci_use_mio(zdev))
|
|
return;
|
|
if (zdev->has_resources && zdev_enabled(zdev))
|
|
zpci_do_update_iomap_fh(zdev, fh);
|
|
}
|
|
|
|
static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
|
|
unsigned long size, unsigned long flags)
|
|
{
|
|
struct resource *r;
|
|
|
|
r = kzalloc(sizeof(*r), GFP_KERNEL);
|
|
if (!r)
|
|
return NULL;
|
|
|
|
r->start = start;
|
|
r->end = r->start + size - 1;
|
|
r->flags = flags;
|
|
r->name = zdev->res_name;
|
|
|
|
if (request_resource(&iomem_resource, r)) {
|
|
kfree(r);
|
|
return NULL;
|
|
}
|
|
return r;
|
|
}
|
|
|
|
int zpci_setup_bus_resources(struct zpci_dev *zdev)
|
|
{
|
|
unsigned long addr, size, flags;
|
|
struct resource *res;
|
|
int i, entry;
|
|
|
|
snprintf(zdev->res_name, sizeof(zdev->res_name),
|
|
"PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
|
|
|
|
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
|
if (!zdev->bars[i].size)
|
|
continue;
|
|
entry = zpci_alloc_iomap(zdev);
|
|
if (entry < 0)
|
|
return entry;
|
|
zdev->bars[i].map_idx = entry;
|
|
|
|
/* only MMIO is supported */
|
|
flags = IORESOURCE_MEM;
|
|
if (zdev->bars[i].val & 8)
|
|
flags |= IORESOURCE_PREFETCH;
|
|
if (zdev->bars[i].val & 4)
|
|
flags |= IORESOURCE_MEM_64;
|
|
|
|
if (zpci_use_mio(zdev))
|
|
addr = (unsigned long) zdev->bars[i].mio_wt;
|
|
else
|
|
addr = ZPCI_ADDR(entry);
|
|
size = 1UL << zdev->bars[i].size;
|
|
|
|
res = __alloc_res(zdev, addr, size, flags);
|
|
if (!res) {
|
|
zpci_free_iomap(zdev, entry);
|
|
return -ENOMEM;
|
|
}
|
|
zdev->bars[i].res = res;
|
|
}
|
|
zdev->has_resources = 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
|
|
{
|
|
struct resource *res;
|
|
int i;
|
|
|
|
pci_lock_rescan_remove();
|
|
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
|
res = zdev->bars[i].res;
|
|
if (!res)
|
|
continue;
|
|
|
|
release_resource(res);
|
|
pci_bus_remove_resource(zdev->zbus->bus, res);
|
|
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
|
|
zdev->bars[i].res = NULL;
|
|
kfree(res);
|
|
}
|
|
zdev->has_resources = 0;
|
|
pci_unlock_rescan_remove();
|
|
}
|
|
|
|
int pcibios_device_add(struct pci_dev *pdev)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
struct resource *res;
|
|
int i;
|
|
|
|
/* The pdev has a reference to the zdev via its bus */
|
|
zpci_zdev_get(zdev);
|
|
if (pdev->is_physfn)
|
|
pdev->no_vf_scan = 1;
|
|
|
|
zpci_map_resources(pdev);
|
|
|
|
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
|
res = &pdev->resource[i];
|
|
if (res->parent || !res->flags)
|
|
continue;
|
|
pci_claim_resource(pdev, i);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void pcibios_release_device(struct pci_dev *pdev)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
|
|
zpci_unmap_resources(pdev);
|
|
zpci_zdev_put(zdev);
|
|
}
|
|
|
|
int pcibios_enable_device(struct pci_dev *pdev, int mask)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
|
|
zpci_debug_init_device(zdev, dev_name(&pdev->dev));
|
|
zpci_fmb_enable_device(zdev);
|
|
|
|
return pci_enable_resources(pdev, mask);
|
|
}
|
|
|
|
void pcibios_disable_device(struct pci_dev *pdev)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
|
|
zpci_fmb_disable_device(zdev);
|
|
zpci_debug_exit_device(zdev);
|
|
}
|
|
|
|
static int __zpci_register_domain(int domain)
|
|
{
|
|
spin_lock(&zpci_domain_lock);
|
|
if (test_bit(domain, zpci_domain)) {
|
|
spin_unlock(&zpci_domain_lock);
|
|
pr_err("Domain %04x is already assigned\n", domain);
|
|
return -EEXIST;
|
|
}
|
|
set_bit(domain, zpci_domain);
|
|
spin_unlock(&zpci_domain_lock);
|
|
return domain;
|
|
}
|
|
|
|
static int __zpci_alloc_domain(void)
|
|
{
|
|
int domain;
|
|
|
|
spin_lock(&zpci_domain_lock);
|
|
/*
|
|
* We can always auto allocate domains below ZPCI_NR_DEVICES.
|
|
* There is either a free domain or we have reached the maximum in
|
|
* which case we would have bailed earlier.
|
|
*/
|
|
domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
|
|
set_bit(domain, zpci_domain);
|
|
spin_unlock(&zpci_domain_lock);
|
|
return domain;
|
|
}
|
|
|
|
int zpci_alloc_domain(int domain)
|
|
{
|
|
if (zpci_unique_uid) {
|
|
if (domain)
|
|
return __zpci_register_domain(domain);
|
|
pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
|
|
update_uid_checking(false);
|
|
}
|
|
return __zpci_alloc_domain();
|
|
}
|
|
|
|
void zpci_free_domain(int domain)
|
|
{
|
|
spin_lock(&zpci_domain_lock);
|
|
clear_bit(domain, zpci_domain);
|
|
spin_unlock(&zpci_domain_lock);
|
|
}
|
|
|
|
|
|
int zpci_enable_device(struct zpci_dev *zdev)
|
|
{
|
|
u32 fh = zdev->fh;
|
|
int rc = 0;
|
|
|
|
if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
|
|
rc = -EIO;
|
|
else
|
|
zpci_update_fh(zdev, fh);
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(zpci_enable_device);
|
|
|
|
int zpci_reenable_device(struct zpci_dev *zdev)
|
|
{
|
|
u8 status;
|
|
int rc;
|
|
|
|
rc = zpci_enable_device(zdev);
|
|
if (rc)
|
|
return rc;
|
|
|
|
rc = zpci_iommu_register_ioat(zdev, &status);
|
|
if (rc)
|
|
zpci_disable_device(zdev);
|
|
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(zpci_reenable_device);
|
|
|
|
int zpci_disable_device(struct zpci_dev *zdev)
|
|
{
|
|
u32 fh = zdev->fh;
|
|
int cc, rc = 0;
|
|
|
|
cc = clp_disable_fh(zdev, &fh);
|
|
if (!cc) {
|
|
zpci_update_fh(zdev, fh);
|
|
} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
|
|
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
|
|
zdev->fid);
|
|
/* Function is already disabled - update handle */
|
|
rc = clp_refresh_fh(zdev->fid, &fh);
|
|
if (!rc) {
|
|
zpci_update_fh(zdev, fh);
|
|
rc = -EINVAL;
|
|
}
|
|
} else {
|
|
rc = -EIO;
|
|
}
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
|
|
|
/**
|
|
* zpci_hot_reset_device - perform a reset of the given zPCI function
|
|
* @zdev: the slot which should be reset
|
|
*
|
|
* Performs a low level reset of the zPCI function. The reset is low level in
|
|
* the sense that the zPCI function can be reset without detaching it from the
|
|
* common PCI subsystem. The reset may be performed while under control of
|
|
* either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
|
|
* table is reinstated at the end of the reset.
|
|
*
|
|
* After the reset the functions internal state is reset to an initial state
|
|
* equivalent to its state during boot when first probing a driver.
|
|
* Consequently after reset the PCI function requires re-initialization via the
|
|
* common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
|
|
* and enabling the function via e.g. pci_enable_device_flags(). The caller
|
|
* must guard against concurrent reset attempts.
|
|
*
|
|
* In most cases this function should not be called directly but through
|
|
* pci_reset_function() or pci_reset_bus() which handle the save/restore and
|
|
* locking - asserted by lockdep.
|
|
*
|
|
* Return: 0 on success and an error value otherwise
|
|
*/
|
|
int zpci_hot_reset_device(struct zpci_dev *zdev)
|
|
{
|
|
int rc;
|
|
|
|
lockdep_assert_held(&zdev->state_lock);
|
|
zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
|
|
if (zdev_enabled(zdev)) {
|
|
/* Disables device access, DMAs and IRQs (reset state) */
|
|
rc = zpci_disable_device(zdev);
|
|
/*
|
|
* Due to a z/VM vs LPAR inconsistency in the error state the
|
|
* FH may indicate an enabled device but disable says the
|
|
* device is already disabled don't treat it as an error here.
|
|
*/
|
|
if (rc == -EINVAL)
|
|
rc = 0;
|
|
if (rc)
|
|
return rc;
|
|
}
|
|
|
|
rc = zpci_reenable_device(zdev);
|
|
|
|
return rc;
|
|
}
|
|
|
|
/**
|
|
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
|
|
* @fid: Function ID of the device to be created
|
|
* @fh: Current Function Handle of the device to be created
|
|
* @state: Initial state after creation either Standby or Configured
|
|
*
|
|
* Allocates a new struct zpci_dev and queries the platform for its details.
|
|
* If successful the device can subsequently be added to the zPCI subsystem
|
|
* using zpci_add_device().
|
|
*
|
|
* Returns: the zdev on success or an error pointer otherwise
|
|
*/
|
|
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
|
{
|
|
struct zpci_dev *zdev;
|
|
int rc;
|
|
|
|
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
|
|
if (!zdev)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
/* FID and Function Handle are the static/dynamic identifiers */
|
|
zdev->fid = fid;
|
|
zdev->fh = fh;
|
|
|
|
/* Query function properties and update zdev */
|
|
rc = clp_query_pci_fn(zdev);
|
|
if (rc)
|
|
goto error;
|
|
zdev->state = state;
|
|
|
|
mutex_init(&zdev->state_lock);
|
|
mutex_init(&zdev->fmb_lock);
|
|
mutex_init(&zdev->kzdev_lock);
|
|
|
|
return zdev;
|
|
|
|
error:
|
|
zpci_dbg(0, "crt fid:%x, rc:%d\n", fid, rc);
|
|
kfree(zdev);
|
|
return ERR_PTR(rc);
|
|
}
|
|
|
|
/**
|
|
* zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem
|
|
* @zdev: The zPCI device to be added
|
|
*
|
|
* A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating
|
|
* a new one as necessary. A hotplug slot is created and events start to be handled.
|
|
* If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used.
|
|
* If adding the struct zpci_dev fails the device was not added and should be freed.
|
|
*
|
|
* Return: 0 on success, or an error code otherwise
|
|
*/
|
|
int zpci_add_device(struct zpci_dev *zdev)
|
|
{
|
|
int rc;
|
|
|
|
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
|
|
rc = zpci_init_iommu(zdev);
|
|
if (rc)
|
|
goto error;
|
|
|
|
rc = zpci_bus_device_register(zdev, &pci_root_ops);
|
|
if (rc)
|
|
goto error_destroy_iommu;
|
|
|
|
kref_init(&zdev->kref);
|
|
spin_lock(&zpci_list_lock);
|
|
list_add_tail(&zdev->entry, &zpci_list);
|
|
spin_unlock(&zpci_list_lock);
|
|
return 0;
|
|
|
|
error_destroy_iommu:
|
|
zpci_destroy_iommu(zdev);
|
|
error:
|
|
zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
|
|
return rc;
|
|
}
|
|
|
|
bool zpci_is_device_configured(struct zpci_dev *zdev)
|
|
{
|
|
enum zpci_state state = zdev->state;
|
|
|
|
return state != ZPCI_FN_STATE_RESERVED &&
|
|
state != ZPCI_FN_STATE_STANDBY;
|
|
}
|
|
|
|
/**
|
|
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
|
|
* @zdev: The zpci_dev to be configured
|
|
* @fh: The general function handle supplied by the platform
|
|
*
|
|
* Given a device in the configuration state Configured, enables, scans and
|
|
* adds it to the common code PCI subsystem if possible. If any failure occurs,
|
|
* the zpci_dev is left disabled.
|
|
*
|
|
* Return: 0 on success, or an error code otherwise
|
|
*/
|
|
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
|
|
{
|
|
zpci_update_fh(zdev, fh);
|
|
return zpci_bus_scan_device(zdev);
|
|
}
|
|
|
|
/**
|
|
* zpci_deconfigure_device() - Deconfigure a zpci_dev
|
|
* @zdev: The zpci_dev to configure
|
|
*
|
|
* Deconfigure a zPCI function that is currently configured and possibly known
|
|
* to the common code PCI subsystem.
|
|
* If any failure occurs the device is left as is.
|
|
*
|
|
* Return: 0 on success, or an error code otherwise
|
|
*/
|
|
int zpci_deconfigure_device(struct zpci_dev *zdev)
|
|
{
|
|
int rc;
|
|
|
|
lockdep_assert_held(&zdev->state_lock);
|
|
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
|
return 0;
|
|
|
|
if (zdev->zbus->bus)
|
|
zpci_bus_remove_device(zdev, false);
|
|
|
|
if (zdev_enabled(zdev)) {
|
|
rc = zpci_disable_device(zdev);
|
|
if (rc)
|
|
return rc;
|
|
}
|
|
|
|
rc = sclp_pci_deconfigure(zdev->fid);
|
|
zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
|
|
if (rc)
|
|
return rc;
|
|
zdev->state = ZPCI_FN_STATE_STANDBY;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* zpci_device_reserved() - Mark device as reserved
|
|
* @zdev: the zpci_dev that was reserved
|
|
*
|
|
* Handle the case that a given zPCI function was reserved by another system.
|
|
* After a call to this function the zpci_dev can not be found via
|
|
* get_zdev_by_fid() anymore but may still be accessible via existing
|
|
* references though it will not be functional anymore.
|
|
*/
|
|
void zpci_device_reserved(struct zpci_dev *zdev)
|
|
{
|
|
/*
|
|
* Remove device from zpci_list as it is going away. This also
|
|
* makes sure we ignore subsequent zPCI events for this device.
|
|
*/
|
|
spin_lock(&zpci_list_lock);
|
|
list_del(&zdev->entry);
|
|
spin_unlock(&zpci_list_lock);
|
|
zdev->state = ZPCI_FN_STATE_RESERVED;
|
|
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
|
|
zpci_zdev_put(zdev);
|
|
}
|
|
|
|
void zpci_release_device(struct kref *kref)
|
|
{
|
|
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
|
|
|
WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED);
|
|
|
|
if (zdev->zbus->bus)
|
|
zpci_bus_remove_device(zdev, false);
|
|
|
|
if (zdev_enabled(zdev))
|
|
zpci_disable_device(zdev);
|
|
|
|
if (zdev->has_hp_slot)
|
|
zpci_exit_slot(zdev);
|
|
|
|
if (zdev->has_resources)
|
|
zpci_cleanup_bus_resources(zdev);
|
|
|
|
zpci_bus_device_unregister(zdev);
|
|
zpci_destroy_iommu(zdev);
|
|
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
|
|
kfree_rcu(zdev, rcu);
|
|
}
|
|
|
|
int zpci_report_error(struct pci_dev *pdev,
|
|
struct zpci_report_error_header *report)
|
|
{
|
|
struct zpci_dev *zdev = to_zpci(pdev);
|
|
|
|
return sclp_pci_report(report, zdev->fh, zdev->fid);
|
|
}
|
|
EXPORT_SYMBOL(zpci_report_error);
|
|
|
|
/**
|
|
* zpci_clear_error_state() - Clears the zPCI error state of the device
|
|
* @zdev: The zdev for which the zPCI error state should be reset
|
|
*
|
|
* Clear the zPCI error state of the device. If clearing the zPCI error state
|
|
* fails the device is left in the error state. In this case it may make sense
|
|
* to call zpci_io_perm_failure() on the associated pdev if it exists.
|
|
*
|
|
* Returns: 0 on success, -EIO otherwise
|
|
*/
|
|
int zpci_clear_error_state(struct zpci_dev *zdev)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
|
|
struct zpci_fib fib = {0};
|
|
u8 status;
|
|
int cc;
|
|
|
|
cc = zpci_mod_fc(req, &fib, &status);
|
|
if (cc) {
|
|
zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
|
|
return -EIO;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* zpci_reset_load_store_blocked() - Re-enables L/S from error state
|
|
* @zdev: The zdev for which to unblock load/store access
|
|
*
|
|
* Re-enables load/store access for a PCI function in the error state while
|
|
* keeping DMA blocked. In this state drivers can poke MMIO space to determine
|
|
* if error recovery is possible while catching any rogue DMA access from the
|
|
* device.
|
|
*
|
|
* Returns: 0 on success, -EIO otherwise
|
|
*/
|
|
int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
|
|
{
|
|
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
|
|
struct zpci_fib fib = {0};
|
|
u8 status;
|
|
int cc;
|
|
|
|
cc = zpci_mod_fc(req, &fib, &status);
|
|
if (cc) {
|
|
zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
|
|
return -EIO;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int zpci_mem_init(void)
|
|
{
|
|
BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
|
|
__alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
|
|
|
|
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
|
|
__alignof__(struct zpci_fmb), 0, NULL);
|
|
if (!zdev_fmb_cache)
|
|
goto error_fmb;
|
|
|
|
zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
|
|
sizeof(*zpci_iomap_start), GFP_KERNEL);
|
|
if (!zpci_iomap_start)
|
|
goto error_iomap;
|
|
|
|
zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
|
|
sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
|
|
if (!zpci_iomap_bitmap)
|
|
goto error_iomap_bitmap;
|
|
|
|
if (static_branch_likely(&have_mio))
|
|
clp_setup_writeback_mio();
|
|
|
|
return 0;
|
|
error_iomap_bitmap:
|
|
kfree(zpci_iomap_start);
|
|
error_iomap:
|
|
kmem_cache_destroy(zdev_fmb_cache);
|
|
error_fmb:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
static void zpci_mem_exit(void)
|
|
{
|
|
kfree(zpci_iomap_bitmap);
|
|
kfree(zpci_iomap_start);
|
|
kmem_cache_destroy(zdev_fmb_cache);
|
|
}
|
|
|
|
static unsigned int s390_pci_probe __initdata = 1;
|
|
unsigned int s390_pci_force_floating __initdata;
|
|
static unsigned int s390_pci_initialized;
|
|
|
|
char * __init pcibios_setup(char *str)
|
|
{
|
|
if (!strcmp(str, "off")) {
|
|
s390_pci_probe = 0;
|
|
return NULL;
|
|
}
|
|
if (!strcmp(str, "nomio")) {
|
|
clear_machine_feature(MFEATURE_PCI_MIO);
|
|
return NULL;
|
|
}
|
|
if (!strcmp(str, "force_floating")) {
|
|
s390_pci_force_floating = 1;
|
|
return NULL;
|
|
}
|
|
if (!strcmp(str, "norid")) {
|
|
s390_pci_no_rid = 1;
|
|
return NULL;
|
|
}
|
|
return str;
|
|
}
|
|
|
|
bool zpci_is_enabled(void)
|
|
{
|
|
return s390_pci_initialized;
|
|
}
|
|
|
|
static int zpci_cmp_rid(void *priv, const struct list_head *a,
|
|
const struct list_head *b)
|
|
{
|
|
struct zpci_dev *za = container_of(a, struct zpci_dev, entry);
|
|
struct zpci_dev *zb = container_of(b, struct zpci_dev, entry);
|
|
|
|
/*
|
|
* PCI functions without RID available maintain original order
|
|
* between themselves but sort before those with RID.
|
|
*/
|
|
if (za->rid == zb->rid)
|
|
return za->rid_available > zb->rid_available;
|
|
/*
|
|
* PCI functions with RID sort by RID ascending.
|
|
*/
|
|
return za->rid > zb->rid;
|
|
}
|
|
|
|
static void zpci_add_devices(struct list_head *scan_list)
|
|
{
|
|
struct zpci_dev *zdev, *tmp;
|
|
|
|
list_sort(NULL, scan_list, &zpci_cmp_rid);
|
|
list_for_each_entry_safe(zdev, tmp, scan_list, entry) {
|
|
list_del_init(&zdev->entry);
|
|
if (zpci_add_device(zdev))
|
|
kfree(zdev);
|
|
}
|
|
}
|
|
|
|
int zpci_scan_devices(void)
|
|
{
|
|
LIST_HEAD(scan_list);
|
|
int rc;
|
|
|
|
rc = clp_scan_pci_devices(&scan_list);
|
|
if (rc)
|
|
return rc;
|
|
|
|
zpci_add_devices(&scan_list);
|
|
zpci_bus_scan_busses();
|
|
return 0;
|
|
}
|
|
|
|
static int __init pci_base_init(void)
|
|
{
|
|
int rc;
|
|
|
|
if (!s390_pci_probe)
|
|
return 0;
|
|
|
|
if (!test_facility(69) || !test_facility(71)) {
|
|
pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
|
|
return 0;
|
|
}
|
|
|
|
if (test_machine_feature(MFEATURE_PCI_MIO)) {
|
|
static_branch_enable(&have_mio);
|
|
system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
|
|
}
|
|
|
|
rc = zpci_debug_init();
|
|
if (rc)
|
|
goto out;
|
|
|
|
rc = zpci_mem_init();
|
|
if (rc)
|
|
goto out_mem;
|
|
|
|
rc = zpci_irq_init();
|
|
if (rc)
|
|
goto out_irq;
|
|
|
|
rc = zpci_scan_devices();
|
|
if (rc)
|
|
goto out_find;
|
|
|
|
s390_pci_initialized = 1;
|
|
return 0;
|
|
|
|
out_find:
|
|
zpci_irq_exit();
|
|
out_irq:
|
|
zpci_mem_exit();
|
|
out_mem:
|
|
zpci_debug_exit();
|
|
out:
|
|
return rc;
|
|
}
|
|
subsys_initcall_sync(pci_base_init);
|