mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 06:49:52 +00:00
mm/filemap: use xas_try_split() in __filemap_add_folio()
Patch series "Minimize xa_node allocation during xarry split", v3. When splitting a multi-index entry in XArray from order-n to order-m, existing xas_split_alloc()+xas_split() approach requires 2^(n % XA_CHUNK_SHIFT) xa_node allocations. But its callers, __filemap_add_folio() and shmem_split_large_entry(), use at most 1 xa_node. To minimize xa_node allocation and remove the limitation of no split from order-12 (or above) to order-0 (or anything between 0 and 5)[1], xas_try_split() was added[2], which allocates (n / XA_CHUNK_SHIFT - m / XA_CHUNK_SHIFT) xa_node. It is used for non-uniform folio split, but can be used by __filemap_add_folio() and shmem_split_large_entry(). xas_split_alloc() and xas_split() split an order-9 to order-0: --------------------------------- | | | | | | | | | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | | | | | | | | | --------------------------------- | | | | ------- --- --- ------- | | ... | | V V V V ----------- ----------- ----------- ----------- | xa_node | | xa_node | ... | xa_node | | xa_node | ----------- ----------- ----------- ----------- xas_try_split() splits an order-9 to order-0: --------------------------------- | | | | | | | | | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | | | | | | | | | --------------------------------- | | V ----------- | xa_node | ----------- xas_try_split() is designed to be called iteratively with n = m + 1. xas_try_split_mini_order() is added to minmize the number of calls to xas_try_split() by telling the caller the next minimal order to split to instead of n - 1. Splitting order-n to order-m when m= l * XA_CHUNK_SHIFT does not require xa_node allocation and requires 1 xa_node when n=l * XA_CHUNK_SHIFT and m = n - 1, so it is OK to use xas_try_split() with n > m + 1 when no new xa_node is needed. xfstests quick group test passed on xfs and tmpfs. [1] https://lore.kernel.org/linux-mm/Z6YX3RznGLUD07Ao@casper.infradead.org/ [2] https://lore.kernel.org/linux-mm/20250226210032.2044041-1-ziy@nvidia.com/ This patch (of 2): During __filemap_add_folio(), a shadow entry is covering n slots and a folio covers m slots with m < n is to be added. Instead of splitting all n slots, only the m slots covered by the folio need to be split and the remaining n-m shadow entries can be retained with orders ranging from m to n-1. This method only requires (n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT) new xa_nodes instead of (n % XA_CHUNK_SHIFT) * ((n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT)) new xa_nodes, compared to the original xas_split_alloc() + xas_split() one. For example, to insert an order-0 folio when an order-9 shadow entry is present (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of 8. xas_try_split_min_order() is introduced to reduce the number of calls to xas_try_split() during split. Link: https://lkml.kernel.org/r/20250314222113.711703-1-ziy@nvidia.com Link: https://lkml.kernel.org/r/20250314222113.711703-2-ziy@nvidia.com Signed-off-by: Zi Yan <ziy@nvidia.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Kairui Song <kasong@tencent.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mattew Wilcox <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yang Shi <yang@os.amperecomputing.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
80a5c494c8
commit
200a89c159
@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas);
|
||||
void xas_split(struct xa_state *, void *entry, unsigned int order);
|
||||
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
|
||||
void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
|
||||
unsigned int xas_try_split_min_order(unsigned int order);
|
||||
#else
|
||||
static inline int xa_get_order(struct xarray *xa, unsigned long index)
|
||||
{
|
||||
@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry,
|
||||
unsigned int order)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned int xas_try_split_min_order(unsigned int order)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
25
lib/xarray.c
25
lib/xarray.c
@ -1134,6 +1134,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_split);
|
||||
|
||||
/**
|
||||
* xas_try_split_min_order() - Minimal split order xas_try_split() can accept
|
||||
* @order: Current entry order.
|
||||
*
|
||||
* xas_try_split() can split a multi-index entry to smaller than @order - 1 if
|
||||
* no new xa_node is needed. This function provides the minimal order
|
||||
* xas_try_split() supports.
|
||||
*
|
||||
* Return: the minimal order xas_try_split() supports
|
||||
*
|
||||
* Context: Any context.
|
||||
*
|
||||
*/
|
||||
unsigned int xas_try_split_min_order(unsigned int order)
|
||||
{
|
||||
if (order % XA_CHUNK_SHIFT == 0)
|
||||
return order == 0 ? 0 : order - 1;
|
||||
|
||||
return order - (order % XA_CHUNK_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_try_split_min_order);
|
||||
|
||||
/**
|
||||
* xas_try_split() - Try to split a multi-index entry.
|
||||
* @xas: XArray operation state.
|
||||
@ -1145,6 +1167,9 @@ EXPORT_SYMBOL_GPL(xas_split);
|
||||
* needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is
|
||||
* NULL. If more new xa_node are needed, the function gives EINVAL error.
|
||||
*
|
||||
* NOTE: use xas_try_split_min_order() to get next split order instead of
|
||||
* @order - 1 if you want to minmize xas_try_split() calls.
|
||||
*
|
||||
* Context: Any context. The caller should hold the xa_lock.
|
||||
*/
|
||||
void xas_try_split(struct xa_state *xas, void *entry, unsigned int order)
|
||||
|
45
mm/filemap.c
45
mm/filemap.c
@ -857,11 +857,10 @@ EXPORT_SYMBOL_GPL(replace_page_cache_folio);
|
||||
noinline int __filemap_add_folio(struct address_space *mapping,
|
||||
struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
|
||||
{
|
||||
XA_STATE(xas, &mapping->i_pages, index);
|
||||
void *alloced_shadow = NULL;
|
||||
int alloced_order = 0;
|
||||
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
|
||||
bool huge;
|
||||
long nr;
|
||||
unsigned int forder = folio_order(folio);
|
||||
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
|
||||
@ -870,7 +869,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
||||
mapping_set_update(&xas, mapping);
|
||||
|
||||
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
|
||||
xas_set_order(&xas, index, folio_order(folio));
|
||||
huge = folio_test_hugetlb(folio);
|
||||
nr = folio_nr_pages(folio);
|
||||
|
||||
@ -880,7 +878,7 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
||||
folio->index = xas.xa_index;
|
||||
|
||||
for (;;) {
|
||||
int order = -1, split_order = 0;
|
||||
int order = -1;
|
||||
void *entry, *old = NULL;
|
||||
|
||||
xas_lock_irq(&xas);
|
||||
@ -898,21 +896,25 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
||||
order = xas_get_order(&xas);
|
||||
}
|
||||
|
||||
/* entry may have changed before we re-acquire the lock */
|
||||
if (alloced_order && (old != alloced_shadow || order != alloced_order)) {
|
||||
xas_destroy(&xas);
|
||||
alloced_order = 0;
|
||||
}
|
||||
|
||||
if (old) {
|
||||
if (order > 0 && order > folio_order(folio)) {
|
||||
if (order > 0 && order > forder) {
|
||||
unsigned int split_order = max(forder,
|
||||
xas_try_split_min_order(order));
|
||||
|
||||
/* How to handle large swap entries? */
|
||||
BUG_ON(shmem_mapping(mapping));
|
||||
if (!alloced_order) {
|
||||
split_order = order;
|
||||
goto unlock;
|
||||
|
||||
while (order > forder) {
|
||||
xas_set_order(&xas, index, split_order);
|
||||
xas_try_split(&xas, old, order);
|
||||
if (xas_error(&xas))
|
||||
goto unlock;
|
||||
order = split_order;
|
||||
split_order =
|
||||
max(xas_try_split_min_order(
|
||||
split_order),
|
||||
forder);
|
||||
}
|
||||
xas_split(&xas, old, order);
|
||||
xas_reset(&xas);
|
||||
}
|
||||
if (shadowp)
|
||||
@ -936,17 +938,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
|
||||
/* split needed, alloc here and retry. */
|
||||
if (split_order) {
|
||||
xas_split_alloc(&xas, old, split_order, gfp);
|
||||
if (xas_error(&xas))
|
||||
goto error;
|
||||
alloced_shadow = old;
|
||||
alloced_order = split_order;
|
||||
xas_reset(&xas);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!xas_nomem(&xas, gfp))
|
||||
break;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user