1
0
mirror of https://github.com/torvalds/linux.git synced 2025-04-09 14:45:27 +00:00

gfs2 changes

- Fix two bugs related to locking request cancelation (locking request
   being retried instead of canceled; canceling the wrong locking
   request).
 
 - Prevent a race between inode creation and deferred delete analogous
   to commit ffd1cf0443a2 from 6.13.  This now allows to further simplify
   gfs2_evict_inode() without introducing mysterious problems.
 
 - When in inode delete should be verified / retried "later" but that
   isn't possible, skip the delete instead of carrying it out
   immediately.  This broke in 6.13.
 
 - More folio conversions from Matthew Wilcox (plus a fix from Dan
   Carpenter).
 
 - Various minor fixes and cleanups.
 -----BEGIN PGP SIGNATURE-----
 
 iQJIBAABCAAyFiEEJZs3krPW0xkhLMTc1b+f6wMTZToFAmfcehcUHGFncnVlbmJh
 QHJlZGhhdC5jb20ACgkQ1b+f6wMTZTqfZA//XPqzf4fuS3E/SAouuHb4/MX8vmsL
 kQozDnCdJqYokU/AUjpwsCTIROURi4Xjfwuj6rd1u/IFDruioX93X/m9iCGH9TeE
 owI+qs+qQ5ZJom+KpoNuGPUw+40qlCOfIx87P3bW6xagerMiyzCdTBc7cTB6lKBi
 NsSShK71uMMLNYEAXJKl7koc9fD9bn143uElH8CLXlomuQkY9QPOD5r4jCJIaPu2
 +RvlfF9zRYc2hYEjSh0daC4Arm1Y3B9Sin6YEIfXi/t53c5eQ1+Ttcw51t4RVBxx
 CSRVUUcDhCF6pof8YkJbPQVrCZqFzorisyUqMP+qE/VW8toFc6qJ9MzcMJwK0DNH
 aNjEK2s3qPCPU4/qM2V7J3dZMD3poJ8cHdAHFU6J5OVFems0kt8jHn8C/RV1KXm9
 Cy/IWupKCMaiMIaoANrAC3xED0KOT11dHBKpYVOQhSJIRJZ+kbjdqKik13HmUAUp
 2r/tlzZNG8hhfBLPCjA0Pz+pph6x/tJO1H24ooC5D24Gn83BKkS3QC/oBVok/I3Q
 /2g61gtVNUwIAxPDnl4IdSvvWHZeSTJRFYGRA13wGbG6I4SV9M4nS+4xrgb6D5DE
 dTRZiU22J+9OJQApnGi9ehOi/49yvySAyqAjVFx+LP+2tLCzj0mcvvLerkEG+V2c
 3WkiUVkLpph+8BA=
 =yUe1
 -----END PGP SIGNATURE-----

Merge tag 'gfs2-for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - Fix two bugs related to locking request cancelation (locking request
   being retried instead of canceled; canceling the wrong locking
   request)

 - Prevent a race between inode creation and deferred delete analogous
   to commit ffd1cf0443a2 from 6.13. This now allows to further simplify
   gfs2_evict_inode() without introducing mysterious problems

 - When in inode delete should be verified / retried "later" but that
   isn't possible, skip the delete instead of carrying it out
   immediately. This broke in 6.13

 - More folio conversions from Matthew Wilcox (plus a fix from Dan
   Carpenter)

 - Various minor fixes and cleanups

* tag 'gfs2-for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (22 commits)
  gfs2: some comment clarifications
  gfs2: Fix a NULL vs IS_ERR() bug in gfs2_find_jhead()
  gfs2: Convert gfs2_meta_read_endio() to use a folio
  gfs2: Convert gfs2_end_log_write_bh() to work on a folio
  gfs2: Convert gfs2_find_jhead() to use a folio
  gfs2: Convert gfs2_jhead_pg_srch() to gfs2_jhead_folio_search()
  gfs2: Use b_folio in gfs2_check_magic()
  gfs2: Use b_folio in gfs2_submit_bhs()
  gfs2: Use b_folio in gfs2_trans_add_meta()
  gfs2: Use b_folio in gfs2_log_write_bh()
  gfs2: skip if we cannot defer delete
  gfs2: remove redundant warnings
  gfs2: minor evict fix
  gfs2: Prevent inode creation race (2)
  gfs2: Fix additional unlikely request cancelation race
  gfs2: Fix request cancelation bug
  gfs2: Check for empty queue in run_queue
  gfs2: Remove more dead code in add_to_queue
  gfs2: Replace GIF_DEFER_DELETE with GLF_DEFER_DELETE
  gfs2: glock holder GL_NOPID fix
  ...
This commit is contained in:
Linus Torvalds 2025-03-27 12:09:25 -07:00
commit ef479de65a
9 changed files with 136 additions and 132 deletions

@ -820,7 +820,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
* that the inode glock may be dropped, fault in the pages manually,
* that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
@ -885,7 +885,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
* that the inode glock may be dropped, fault in the pages manually,
* that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* For writes, iomap_dio_rw only triggers manual page faults, so we
@ -957,7 +957,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
* that the inode glock may be dropped, fault in the pages manually,
* that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
@ -1024,7 +1024,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
* that the inode glock may be dropped, fault in the pages manually,
* that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/

@ -607,14 +607,19 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
if (gh && (ret & LM_OUT_CANCELED))
gfs2_holder_wake(gh);
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
/* move to back of queue and try next entry */
if (ret & LM_OUT_CANCELED) {
list_move_tail(&gh->gh_list, &gl->gl_holders);
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gl->gl_target = gl->gl_state;
gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state;
if (do_promote(gl))
goto out;
goto retry;
if (gh) {
gl->gl_target = gh->gh_state;
if (do_promote(gl))
goto out;
do_xmote(gl, gh, gl->gl_target);
return;
}
goto out;
}
/* Some error or failed "try lock" - report it */
if ((ret & LM_OUT_ERROR) ||
@ -627,7 +632,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
switch(state) {
/* Unlocked due to conversion deadlock, try again */
case LM_ST_UNLOCKED:
retry:
do_xmote(gl, gh, gl->gl_target);
break;
/* Conversion fails, unlock and try again */
@ -661,7 +665,8 @@ retry:
do_promote(gl);
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
if (!test_bit(GLF_CANCELING, &gl->gl_flags))
clear_bit(GLF_LOCK, &gl->gl_flags);
}
static bool is_system_glock(struct gfs2_glock *gl)
@ -807,6 +812,7 @@ skip_inval:
}
if (ls->ls_ops->lm_lock) {
set_bit(GLF_PENDING_REPLY, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
spin_lock(&gl->gl_lockref.lock);
@ -825,6 +831,7 @@ skip_inval:
/* The operation will be completed asynchronously. */
return;
}
clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
}
/* Complete the operation now. */
@ -843,12 +850,13 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
struct gfs2_holder *gh = NULL;
struct gfs2_holder *gh;
if (test_bit(GLF_LOCK, &gl->gl_flags))
return;
set_bit(GLF_LOCK, &gl->gl_flags);
/* While a demote is in progress, the GLF_LOCK flag must be set. */
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
@ -860,18 +868,22 @@ __acquires(&gl->gl_lockref.lock)
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
do_xmote(gl, NULL, gl->gl_target);
return;
} else {
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
gfs2_demote_wake(gl);
if (do_promote(gl))
goto out_unlock;
gh = find_first_waiter(gl);
if (!gh)
goto out_unlock;
gl->gl_target = gh->gh_state;
if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
do_error(gl, 0); /* Fail queued try locks */
do_xmote(gl, gh, gl->gl_target);
return;
}
do_xmote(gl, gh, gl->gl_target);
return;
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
@ -898,12 +910,8 @@ void glock_set_object(struct gfs2_glock *gl, void *object)
prev_object = gl->gl_object;
gl->gl_object = object;
spin_unlock(&gl->gl_lockref.lock);
if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
pr_warn("glock=%u/%llx\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number);
if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL))
gfs2_dump_glock(NULL, gl, true);
}
}
/**
@ -919,12 +927,8 @@ void glock_clear_object(struct gfs2_glock *gl, void *object)
prev_object = gl->gl_object;
gl->gl_object = NULL;
spin_unlock(&gl->gl_lockref.lock);
if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) {
pr_warn("glock=%u/%llx\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number);
if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object))
gfs2_dump_glock(NULL, gl, true);
}
}
void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
@ -959,6 +963,25 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
gfs2_holder_uninit(&gh);
}
static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
spin_lock(&gl->gl_lockref.lock);
ip = gl->gl_object;
if (ip && !igrab(&ip->i_inode))
ip = NULL;
spin_unlock(&gl->gl_lockref.lock);
if (ip) {
wait_on_inode(&ip->i_inode);
if (is_bad_inode(&ip->i_inode)) {
iput(&ip->i_inode);
ip = NULL;
}
}
return ip;
}
static void gfs2_try_evict(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
@ -976,32 +999,15 @@ static void gfs2_try_evict(struct gfs2_glock *gl)
* happened below. (Verification is triggered by the call to
* gfs2_queue_verify_delete() in gfs2_evict_inode().)
*/
spin_lock(&gl->gl_lockref.lock);
ip = gl->gl_object;
if (ip && !igrab(&ip->i_inode))
ip = NULL;
spin_unlock(&gl->gl_lockref.lock);
ip = gfs2_grab_existing_inode(gl);
if (ip) {
wait_on_inode(&ip->i_inode);
if (is_bad_inode(&ip->i_inode)) {
iput(&ip->i_inode);
ip = NULL;
}
}
if (ip) {
set_bit(GIF_DEFER_DELETE, &ip->i_flags);
set_bit(GLF_DEFER_DELETE, &gl->gl_flags);
d_prune_aliases(&ip->i_inode);
iput(&ip->i_inode);
clear_bit(GLF_DEFER_DELETE, &gl->gl_flags);
/* If the inode was evicted, gl->gl_object will now be NULL. */
spin_lock(&gl->gl_lockref.lock);
ip = gl->gl_object;
if (ip) {
clear_bit(GIF_DEFER_DELETE, &ip->i_flags);
if (!igrab(&ip->i_inode))
ip = NULL;
}
spin_unlock(&gl->gl_lockref.lock);
ip = gfs2_grab_existing_inode(gl);
if (ip) {
gfs2_glock_poke(ip->i_gl);
iput(&ip->i_inode);
@ -1462,9 +1468,7 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
{
if (!(gh->gh_flags & GL_NOPID))
return true;
if (gh->gh_state == LM_ST_UNLOCKED)
return true;
return false;
return !test_bit(HIF_HOLDER, &gh->gh_iflags);
}
/**
@ -1483,7 +1487,6 @@ __acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct list_head *insert_pt = NULL;
struct gfs2_holder *gh2;
int try_futile = 0;
@ -1519,21 +1522,11 @@ fail:
gfs2_holder_wake(gh);
return;
}
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
continue;
}
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
if (likely(insert_pt == NULL)) {
list_add_tail(&gh->gh_list, &gl->gl_holders);
return;
}
list_add_tail(&gh->gh_list, insert_pt);
spin_unlock(&gl->gl_lockref.lock);
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
spin_lock(&gl->gl_lockref.lock);
list_add_tail(&gh->gh_list, &gl->gl_holders);
return;
trap_recursive:
@ -1673,11 +1666,19 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
}
if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
!test_bit(HIF_HOLDER, &gh->gh_iflags) &&
test_bit(GLF_LOCK, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
!test_bit(GLF_CANCELING, &gl->gl_flags)) {
set_bit(GLF_CANCELING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_CANCELING, &gl->gl_flags);
clear_bit(GLF_LOCK, &gl->gl_flags);
if (!gfs2_holder_queued(gh))
goto out;
}
/*
@ -1923,6 +1924,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
@ -2323,6 +2325,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'f';
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
*p++ = 'i';
if (test_bit(GLF_PENDING_REPLY, gflags))
*p++ = 'R';
if (test_bit(GLF_HAVE_REPLY, gflags))
*p++ = 'r';
if (test_bit(GLF_INITIAL, gflags))
@ -2347,6 +2351,10 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'e';
if (test_bit(GLF_VERIFY_DELETE, gflags))
*p++ = 'E';
if (test_bit(GLF_DEFER_DELETE, gflags))
*p++ = 's';
if (test_bit(GLF_CANCELING, gflags))
*p++ = 'C';
*p = 0;
return buf;
}

@ -330,6 +330,9 @@ enum {
GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */
GLF_TRY_TO_EVICT = 17, /* iopen glocks only */
GLF_VERIFY_DELETE = 18, /* iopen glocks only */
GLF_PENDING_REPLY = 19,
GLF_DEFER_DELETE = 20, /* iopen glocks only */
GLF_CANCELING = 21,
};
struct gfs2_glock {
@ -376,7 +379,6 @@ enum {
GIF_SW_PAGED = 3,
GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
GIF_DEFER_DELETE = 7,
};
struct gfs2_inode {

@ -157,7 +157,9 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
/**
* gfs2_end_log_write_bh - end log write of pagecache data with buffers
* @sdp: The superblock
* @bvec: The bio_vec
* @folio: The folio
* @offset: The first byte within the folio that completed
* @size: The number of bytes that completed
* @error: The i/o status
*
* This finds the relevant buffers and unlocks them and sets the
@ -166,17 +168,13 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
* that is pinned in the pagecache.
*/
static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
struct bio_vec *bvec,
blk_status_t error)
static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio,
size_t offset, size_t size, blk_status_t error)
{
struct buffer_head *bh, *next;
struct page *page = bvec->bv_page;
unsigned size;
bh = page_buffers(page);
size = bvec->bv_len;
while (bh_offset(bh) < bvec->bv_offset)
bh = folio_buffers(folio);
while (bh_offset(bh) < offset)
bh = bh->b_this_page;
do {
if (error)
@ -186,7 +184,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
size -= bh->b_size;
brelse(bh);
bh = next;
} while(bh && size);
} while (bh && size);
}
/**
@ -203,7 +201,6 @@ static void gfs2_end_log_write(struct bio *bio)
{
struct gfs2_sbd *sdp = bio->bi_private;
struct bio_vec *bvec;
struct page *page;
struct bvec_iter_all iter_all;
if (bio->bi_status) {
@ -217,9 +214,12 @@ static void gfs2_end_log_write(struct bio *bio)
}
bio_for_each_segment_all(bvec, bio, iter_all) {
page = bvec->bv_page;
if (page_has_buffers(page))
gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
struct page *page = bvec->bv_page;
struct folio *folio = page_folio(page);
if (folio && folio_buffers(folio))
gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset,
bvec->bv_len, bio->bi_status);
else
mempool_free(page, gfs2_page_pool);
}
@ -359,8 +359,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp);
gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
bh_offset(bh), dblock);
gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0),
bh->b_size, bh_offset(bh), dblock);
}
/**
@ -406,17 +406,16 @@ static void gfs2_end_log_read(struct bio *bio)
}
/**
* gfs2_jhead_pg_srch - Look for the journal head in a given page.
* gfs2_jhead_folio_search - Look for the journal head in a given page.
* @jd: The journal descriptor
* @head: The journal head to start from
* @page: The page to look in
* @folio: The folio to look in
*
* Returns: 1 if found, 0 otherwise.
*/
static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head,
struct page *page)
static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head,
struct folio *folio)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host lh;
@ -424,7 +423,8 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
unsigned int offset;
bool ret = false;
kaddr = kmap_local_page(page);
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
kaddr = kmap_local_folio(folio, 0);
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
if (lh.lh_sequence >= head->lh_sequence)
@ -472,7 +472,7 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
*done = true;
if (!*done)
*done = gfs2_jhead_pg_srch(jd, head, &folio->page);
*done = gfs2_jhead_folio_search(jd, head, folio);
/* filemap_get_folio() and the earlier grab_cache_page() */
folio_put_refs(folio, 2);
@ -512,9 +512,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
struct gfs2_journal_extent *je;
int sz, ret = 0;
int ret = 0;
struct bio *bio = NULL;
struct page *page = NULL;
struct folio *folio = NULL;
bool done = false;
errseq_t since;
@ -527,10 +527,11 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
u64 dblock = je->dblock;
for (; block < je->lblock + je->blocks; block++, dblock++) {
if (!page) {
page = grab_cache_page(mapping, block >> shift);
if (!page) {
ret = -ENOMEM;
if (!folio) {
folio = filemap_grab_folio(mapping,
block >> shift);
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
done = true;
goto out;
}
@ -541,8 +542,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
sz = bio_add_page(bio, page, bsize, off);
if (sz == bsize)
if (bio_add_folio(bio, folio, bsize, off))
goto block_added;
}
if (off) {
@ -562,12 +562,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
add_block_to_new_bio:
sz = bio_add_page(bio, page, bsize, off);
BUG_ON(sz != bsize);
if (!bio_add_folio(bio, folio, bsize, off))
BUG();
block_added:
off += bsize;
if (off == PAGE_SIZE)
page = NULL;
if (off == folio_size(folio))
folio = NULL;
if (blocks_submitted <= blocks_read + max_blocks) {
/* Keep at least one bio in flight */
continue;
@ -615,15 +615,13 @@ static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
static void gfs2_check_magic(struct buffer_head *bh)
{
void *kaddr;
__be32 *ptr;
clear_buffer_escaped(bh);
kaddr = kmap_local_page(bh->b_page);
ptr = kaddr + bh_offset(bh);
ptr = kmap_local_folio(bh->b_folio, bh_offset(bh));
if (*ptr == cpu_to_be32(GFS2_MAGIC))
set_buffer_escaped(bh);
kunmap_local(kaddr);
kunmap_local(ptr);
}
static int blocknr_cmp(void *priv, const struct list_head *a,

@ -198,15 +198,14 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
static void gfs2_meta_read_endio(struct bio *bio)
{
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
struct folio_iter fi;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
struct buffer_head *bh = page_buffers(page);
unsigned int len = bvec->bv_len;
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
struct buffer_head *bh = folio_buffers(folio);
size_t len = fi.length;
while (bh_offset(bh) < bvec->bv_offset)
while (bh_offset(bh) < fi.offset)
bh = bh->b_this_page;
do {
struct buffer_head *next = bh->b_this_page;
@ -232,7 +231,7 @@ static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
while (num > 0) {
bh = *bhs;
if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) {
BUG_ON(bio->bi_iter.bi_size == 0);
break;
}

@ -1329,7 +1329,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
goto should_delete;
if (test_bit(GIF_DEFER_DELETE, &ip->i_flags))
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
return EVICT_SHOULD_DEFER_DELETE;
/* Deletes should never happen under memory pressure anymore. */
@ -1338,12 +1339,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
/* Must not read inode block until block type has been verified */
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
if (unlikely(ret)) {
glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
return EVICT_SHOULD_DEFER_DELETE;
}
if (unlikely(ret))
return EVICT_SHOULD_SKIP_DELETE;
if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
return EVICT_SHOULD_SKIP_DELETE;
@ -1363,15 +1360,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
should_delete:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
enum evict_behavior behavior =
gfs2_upgrade_iopen_glock(inode);
if (behavior != EVICT_SHOULD_DELETE) {
gfs2_holder_uninit(&ip->i_iopen_gh);
return behavior;
}
}
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
return gfs2_upgrade_iopen_glock(inode);
return EVICT_SHOULD_DELETE;
}
@ -1509,7 +1499,7 @@ static void gfs2_evict_inode(struct inode *inode)
gfs2_glock_put(io_gl);
goto out;
}
behavior = EVICT_SHOULD_DELETE;
behavior = EVICT_SHOULD_SKIP_DELETE;
}
if (behavior == EVICT_SHOULD_DELETE)
ret = evict_unlinked_inode(inode);

@ -53,12 +53,20 @@
{(1UL << GLF_DIRTY), "y" }, \
{(1UL << GLF_LFLUSH), "f" }, \
{(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
{(1UL << GLF_PENDING_REPLY), "R" }, \
{(1UL << GLF_HAVE_REPLY), "r" }, \
{(1UL << GLF_INITIAL), "a" }, \
{(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \
{(1UL << GLF_LRU), "L" }, \
{(1UL << GLF_OBJECT), "o" }, \
{(1UL << GLF_BLOCKING), "b" })
{(1UL << GLF_BLOCKING), "b" }, \
{(1UL << GLF_UNLOCKED), "x" }, \
{(1UL << GLF_INSTANTIATE_NEEDED), "n" }, \
{(1UL << GLF_INSTANTIATE_IN_PROG), "N" }, \
{(1UL << GLF_TRY_TO_EVICT), "e" }, \
{(1UL << GLF_VERIFY_DELETE), "E" }, \
{(1UL << GLF_DEFER_DELETE), "s" }, \
{(1UL << GLF_CANCELING), "C" })
#ifndef NUMPTY
#define NUMPTY

@ -246,12 +246,12 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
if (bd == NULL) {
gfs2_log_unlock(sdp);
unlock_buffer(bh);
lock_page(bh->b_page);
folio_lock(bh->b_folio);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
unlock_page(bh->b_page);
folio_unlock(bh->b_folio);
lock_buffer(bh);
gfs2_log_lock(sdp);
}

@ -182,7 +182,6 @@ static inline unsigned long bh_offset(const struct buffer_head *bh)
BUG_ON(!PagePrivate(page)); \
((struct buffer_head *)page_private(page)); \
})
#define page_has_buffers(page) PagePrivate(page)
#define folio_buffers(folio) folio_get_private(folio)
void buffer_check_dirty_writeback(struct folio *folio,