f2fs-for-6.14-rc1

In this series, there are several major improvements such as 1) folio conversion
 made by Matthew, 2) speed-up of block truncation, 3) caching more dentry pages.
 In addition, we implemented a linear dentry search to address recent unicode
 regression, and figured out some false alarms that we could get rid of.
 
 Enhancement:
  - foilio conversion in various IO paths
  - optimize f2fs_truncate_data_blocks_range()
  - cache more dentry pages
  - remove unnecessary blk_finish_plug
  - procfs: show mtime in segment_bits
 
 Bug fix:
  - introduce linear search for dentries
  - don't call block truncation for aliased file
  - fix using wrong 'submitted' value in f2fs_write_cache_pages
  - fix to do sanity check correctly on i_inline_xattr_size
  - avoid trying to get invalid block address
  - fix inconsistent dirty state of atomic file
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE00UqedjCtOrGVvQiQBSofoJIUNIFAmeYV/QACgkQQBSofoJI
 UNKsPg/+NzFrK/D5nFJ6t86T2XdngzESbI+gbydA8CrT7VoAw5Es0GTswnsStnqF
 DaWWiz9TYDTJWarKMklZ8zcGwcQGAPZqyg3X+eUPb2Rfr9DK80Twov5nfzai/ZVM
 iJQuT7vAqbgJnmF1caJYghuOuJpd43U1lK/CxEomXzBCGVJipvSa7Mzh9awUS0P+
 luvTYjZXh3BISZDnqIbxVjZjcd6TKoBHVqKtz0JbrghVKJRXiVHr4IPnzUQ6hCE8
 MvN07mfQJPyIrZV1jVX/syYKUgwS/QYAmeca/uFGoYO0cSn3qAhdn0PLWpQBIB+D
 ST2SIE9penLlhCb8zN4d6Q6LwEcOWIbtcXffsix3EBCQosKqrqznV0SJ+fjGjuuw
 kX3ICsidYzB8GeHtf6dgH8dRqP4kvYnDe6P0Ho6iuxCZPHWiVauthORuMqerXFNn
 8hHtnGMqybGnT6Py51bt4qlxIgTVl3YO1643Ej8ihpCXJPoCmi6cTyK/M/KaZoaM
 6YYeTZwWbPuCclLm+iVNUPs0asxESSBqHTXm+r9NkaExtmclFyQs1edZ/pYUihq2
 CjvluyKVMuLVieU631am6X3H8sJsgepb8mjsJagtqF36DlCSW8jHgaqkl4gyi5m8
 V4c3w2rmh8IssjTCXxEGtqRQ/Qdbabo9aiFcNa37t1ov7+6GzEk=
 =PEtq
 -----END PGP SIGNATURE-----

Merge tag 'f2fs-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this series, there are several major improvements such as folio
  conversion by Matthew, speed-up of block truncation, and caching more
  dentry pages.

  In addition, we implemented a linear dentry search to address recent
  unicode regression, and figured out some false alarms that we could
  get rid of.

  Enhancements:
   - foilio conversion in various IO paths
   - optimize f2fs_truncate_data_blocks_range()
   - cache more dentry pages
   - remove unnecessary blk_finish_plug
   - procfs: show mtime in segment_bits

  Bug fixes:
   - introduce linear search for dentries
   - don't call block truncation for aliased file
   - fix using wrong 'submitted' value in f2fs_write_cache_pages
   - fix to do sanity check correctly on i_inline_xattr_size
   - avoid trying to get invalid block address
   - fix inconsistent dirty state of atomic file"

* tag 'f2fs-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (32 commits)
  f2fs: fix inconsistent dirty state of atomic file
  f2fs: fix to avoid changing 'check only' behaior of recovery
  f2fs: Clean up the loop outside of f2fs_invalidate_blocks()
  f2fs: procfs: show mtime in segment_bits
  f2fs: fix to avoid return invalid mtime from f2fs_get_section_mtime()
  f2fs: Fix format specifier in sanity_check_inode()
  f2fs: avoid trying to get invalid block address
  f2fs: fix to do sanity check correctly on i_inline_xattr_size
  f2fs: remove blk_finish_plug
  f2fs: Optimize f2fs_truncate_data_blocks_range()
  f2fs: fix using wrong 'submitted' value in f2fs_write_cache_pages
  f2fs: add parameter @len to f2fs_invalidate_blocks()
  f2fs: update_sit_entry_for_release() supports consecutive blocks.
  f2fs: introduce update_sit_entry_for_release/alloc()
  f2fs: don't call block truncation for aliased file
  f2fs: Introduce linear search for dentries
  f2fs: add parameter @len to f2fs_invalidate_internal_cache()
  f2fs: expand f2fs_invalidate_compress_page() to f2fs_invalidate_compress_pages_range()
  f2fs: ensure that node info flags are always initialized
  f2fs: The GC triggered by ioctl also needs to mark the segno as victim
  ...
This commit is contained in:
Linus Torvalds 2025-01-27 20:58:58 -08:00
commit 6d61a53dd6
14 changed files with 336 additions and 213 deletions

View File

@ -846,7 +846,7 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate)
{
unsigned long pgidx = pages[index]->index;
unsigned long pgidx = page_folio(pages[index])->index;
int i = uptodate ? 0 : 1;
/*
@ -860,9 +860,11 @@ bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
return false;
for (; i < cc->cluster_size; i++) {
if (pages[index + i]->index != pgidx + i)
struct folio *folio = page_folio(pages[index + i]);
if (folio->index != pgidx + i)
return false;
if (uptodate && !PageUptodate(pages[index + i]))
if (uptodate && !folio_test_uptodate(folio))
return false;
}
@ -1195,7 +1197,8 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
};
bool first_index = (index == cc.rpages[0]->index);
struct folio *folio = page_folio(cc.rpages[0]);
bool first_index = (index == folio->index);
if (copied)
set_cluster_dirty(&cc);
@ -1239,13 +1242,14 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
int i;
for (i = cluster_size - 1; i >= 0; i--) {
loff_t start = rpages[i]->index << PAGE_SHIFT;
struct folio *folio = page_folio(rpages[i]);
loff_t start = folio->index << PAGE_SHIFT;
if (from <= start) {
zero_user_segment(rpages[i], 0, PAGE_SIZE);
folio_zero_segment(folio, 0, folio_size(folio));
} else {
zero_user_segment(rpages[i], from - start,
PAGE_SIZE);
folio_zero_segment(folio, from - start,
folio_size(folio));
break;
}
}
@ -1278,6 +1282,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1 : 0,
};
struct folio *folio;
struct dnode_of_data dn;
struct node_info ni;
struct compress_io_ctx *cic;
@ -1289,7 +1294,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
mapping_set_error(cc->rpages[0]->mapping, -EIO);
mapping_set_error(inode->i_mapping, -EIO);
goto out_free;
}
@ -1316,7 +1321,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_put_dnode;
}
psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
folio = page_folio(cc->rpages[last_index]);
psize = folio_pos(folio) + folio_size(folio);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
@ -1339,7 +1345,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index, cic);
page_folio(cc->rpages[i + 1])->index, cic);
fio.compressed_page = cc->cpages[i];
fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
@ -1374,7 +1380,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (blkaddr == COMPRESS_ADDR)
fio.compr_blocks++;
if (__is_valid_data_blkaddr(blkaddr))
f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
goto unlock_continue;
}
@ -1384,7 +1390,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
}
goto unlock_continue;
@ -1545,6 +1551,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
submitted = 0;
ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
&submitted,
NULL, NULL, wbc, io_type,
@ -1903,11 +1910,12 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
return sbi->compress_inode->i_mapping;
}
void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
block_t blkaddr, unsigned int len)
{
if (!sbi->compress_inode)
return;
invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,

View File

@ -70,9 +70,9 @@ bool f2fs_is_cp_guaranteed(struct page *page)
return false;
}
static enum count_type __read_io_type(struct page *page)
static enum count_type __read_io_type(struct folio *folio)
{
struct address_space *mapping = page_file_mapping(page);
struct address_space *mapping = folio->mapping;
if (mapping) {
struct inode *inode = mapping->host;
@ -136,27 +136,22 @@ struct bio_post_read_ctx {
*/
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
struct folio_iter fi;
struct bio_post_read_ctx *ctx = bio->bi_private;
bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
if (f2fs_is_compressed_page(page)) {
if (f2fs_is_compressed_page(&folio->page)) {
if (ctx && !ctx->decompression_attempted)
f2fs_end_read_compressed_page(page, true, 0,
f2fs_end_read_compressed_page(&folio->page, true, 0,
in_task);
f2fs_put_page_dic(page, in_task);
f2fs_put_page_dic(&folio->page, in_task);
continue;
}
if (bio->bi_status)
ClearPageUptodate(page);
else
SetPageUptodate(page);
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
folio_end_read(folio, bio->bi_status == 0);
}
if (ctx)
@ -516,10 +511,6 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
enum page_type type)
{
WARN_ON_ONCE(is_read_io(bio_op(bio)));
if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
blk_finish_plug(current->plug);
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
@ -689,33 +680,29 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
struct folio *fio_folio = page_folio(fio->page);
struct folio *data_folio = fio->encrypted_page ?
page_folio(fio->encrypted_page) : fio_folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
fio->is_por ? META_POR : (__is_meta_io(fio) ?
META_GENERIC : DATA_GENERIC_ENHANCE)))
return -EFSCORRUPTED;
trace_f2fs_submit_page_bio(page, fio);
trace_f2fs_submit_folio_bio(data_folio, fio);
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
page_folio(fio->page)->index, fio, GFP_NOIO);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
fio_folio->index, fio, GFP_NOIO);
bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);
if (fio->io_wbc && !is_read_io(fio->op))
wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
PAGE_SIZE);
wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page) : WB_DATA_TYPE(fio->page, false));
__read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@ -894,7 +881,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
return -EFSCORRUPTED;
trace_f2fs_submit_page_bio(page, fio);
trace_f2fs_submit_folio_bio(page_folio(page), fio);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
@ -1018,7 +1005,7 @@ alloc_new:
io->last_block_in_bio = fio->new_blkaddr;
trace_f2fs_submit_page_write(fio->page, fio);
trace_f2fs_submit_folio_write(page_folio(fio->page), fio);
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
@ -1289,7 +1276,7 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct page *page;
page = find_get_page(mapping, index);
page = find_get_page_flags(mapping, index, FGP_ACCESSED);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
@ -1423,7 +1410,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
return err;
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
f2fs_invalidate_internal_cache(sbi, old_blkaddr);
f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
return 0;
@ -2464,7 +2451,7 @@ next_page:
static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
struct inode *inode = folio_file_mapping(folio)->host;
struct inode *inode = folio->mapping->host;
int ret = -EAGAIN;
trace_f2fs_readpage(folio, DATA);
@ -3163,6 +3150,7 @@ continue_unlock:
continue;
}
#endif
submitted = 0;
ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);

View File

@ -175,7 +175,8 @@ static unsigned long dir_block_index(unsigned int level,
static struct f2fs_dir_entry *find_in_block(struct inode *dir,
struct page *dentry_page,
const struct f2fs_filename *fname,
int *max_slots)
int *max_slots,
bool use_hash)
{
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
@ -183,7 +184,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
make_dentry_ptr_block(dir, &d, dentry_blk);
return f2fs_find_target_dentry(&d, fname, max_slots);
return f2fs_find_target_dentry(&d, fname, max_slots, use_hash);
}
static inline int f2fs_match_name(const struct inode *dir,
@ -208,7 +209,8 @@ static inline int f2fs_match_name(const struct inode *dir,
}
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
const struct f2fs_filename *fname, int *max_slots)
const struct f2fs_filename *fname, int *max_slots,
bool use_hash)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
@ -231,7 +233,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
continue;
}
if (de->hash_code == fname->hash) {
if (!use_hash || de->hash_code == fname->hash) {
res = f2fs_match_name(d->inode, fname,
d->filename[bit_pos],
le16_to_cpu(de->name_len));
@ -258,11 +260,12 @@ found:
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
const struct f2fs_filename *fname,
struct page **res_page)
struct page **res_page,
bool use_hash)
{
int s = GET_DENTRY_SLOTS(fname->disk_name.len);
unsigned int nbucket, nblock;
unsigned int bidx, end_block;
unsigned int bidx, end_block, bucket_no;
struct page *dentry_page;
struct f2fs_dir_entry *de = NULL;
pgoff_t next_pgofs;
@ -272,8 +275,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0;
start_find_bucket:
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
le32_to_cpu(fname->hash) % nbucket);
bucket_no);
end_block = bidx + nblock;
while (bidx < end_block) {
@ -290,7 +296,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
}
de = find_in_block(dir, dentry_page, fname, &max_slots);
de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash);
if (IS_ERR(de)) {
*res_page = ERR_CAST(de);
de = NULL;
@ -307,12 +313,18 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
bidx++;
}
if (!de && room && F2FS_I(dir)->chash != fname->hash) {
F2FS_I(dir)->chash = fname->hash;
F2FS_I(dir)->clevel = level;
}
if (de)
return de;
return de;
if (likely(use_hash)) {
if (room && F2FS_I(dir)->chash != fname->hash) {
F2FS_I(dir)->chash = fname->hash;
F2FS_I(dir)->clevel = level;
}
} else if (++bucket_no < nbucket) {
goto start_find_bucket;
}
return NULL;
}
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
@ -323,11 +335,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
bool use_hash = true;
*res_page = NULL;
#if IS_ENABLED(CONFIG_UNICODE)
start_find_entry:
#endif
if (f2fs_has_inline_dentry(dir)) {
de = f2fs_find_in_inline_dir(dir, fname, res_page);
de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash);
goto out;
}
@ -343,11 +359,18 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
}
for (level = 0; level < max_depth; level++) {
de = find_in_level(dir, level, fname, res_page);
de = find_in_level(dir, level, fname, res_page, use_hash);
if (de || IS_ERR(*res_page))
break;
}
out:
#if IS_ENABLED(CONFIG_UNICODE)
if (IS_CASEFOLDED(dir) && !de && use_hash) {
use_hash = false;
goto start_find_entry;
}
#endif
/* This is to increase the speed of f2fs_create */
if (!de)
F2FS_I(dir)->task = current;

View File

@ -1985,9 +1985,14 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
return F2FS_I_SB(mapping->host);
}
static inline struct f2fs_sb_info *F2FS_F_SB(struct folio *folio)
{
return F2FS_M_SB(folio->mapping);
}
static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
{
return F2FS_M_SB(page_file_mapping(page));
return F2FS_F_SB(page_folio(page));
}
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@ -3565,7 +3570,8 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct f2fs_filename *fname);
void f2fs_free_filename(struct f2fs_filename *fname);
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
const struct f2fs_filename *fname, int *max_slots);
const struct f2fs_filename *fname, int *max_slots,
bool use_hash);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
@ -3700,7 +3706,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
unsigned int len);
bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
@ -4201,7 +4208,8 @@ int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
struct page **res_page);
struct page **res_page,
bool use_hash);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage);
int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
@ -4368,7 +4376,8 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
int __init f2fs_init_compress_cache(void);
void f2fs_destroy_compress_cache(void);
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
block_t blkaddr, unsigned int len);
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
nid_t ino, block_t blkaddr);
bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
@ -4423,8 +4432,8 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
static inline void f2fs_destroy_compress_cache(void) { }
static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
block_t blkaddr) { }
static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
block_t blkaddr, unsigned int len) { }
static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
struct page *page, nid_t ino, block_t blkaddr) { }
static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
@ -4740,10 +4749,10 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
}
static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
block_t blkaddr)
block_t blkaddr, unsigned int len)
{
f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
f2fs_invalidate_compress_page(sbi, blkaddr);
f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
f2fs_invalidate_compress_pages_range(sbi, blkaddr, len);
}
#define EFSBADCRC EBADMSG /* Bad CRC detected */

View File

@ -621,8 +621,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
block_t blkstart;
int blklen = 0;
addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
blkstart = le32_to_cpu(*addr);
/* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@ -638,26 +641,44 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
}
if (blkaddr == NULL_ADDR)
continue;
goto next;
f2fs_set_data_blkaddr(dn, NULL_ADDR);
if (__is_valid_data_blkaddr(blkaddr)) {
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
continue;
goto next;
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
continue;
goto next;
if (compressed_cluster)
valid_blocks++;
}
f2fs_invalidate_blocks(sbi, blkaddr);
if (blkstart + blklen == blkaddr) {
blklen++;
} else {
f2fs_invalidate_blocks(sbi, blkstart, blklen);
blkstart = blkaddr;
blklen = 1;
}
if (!released || blkaddr != COMPRESS_ADDR)
nr_free++;
continue;
next:
if (blklen)
f2fs_invalidate_blocks(sbi, blkstart, blklen);
blkstart = le32_to_cpu(*(addr + 1));
blklen = 0;
}
if (blklen)
f2fs_invalidate_blocks(sbi, blkstart, blklen);
if (compressed_cluster)
f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
@ -747,10 +768,8 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
if (IS_DEVICE_ALIASING(inode)) {
struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
struct extent_info ei = et->largest;
unsigned int i;
for (i = 0; i < ei.len; i++)
f2fs_invalidate_blocks(sbi, ei.blk + i);
f2fs_invalidate_blocks(sbi, ei.blk, ei.len);
dec_valid_block_count(sbi, inode, ei.len);
f2fs_update_time(sbi, REQ_TIME);
@ -1323,7 +1342,7 @@ static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
if (ret) {
dec_valid_block_count(sbi, inode, 1);
f2fs_invalidate_blocks(sbi, *blkaddr);
f2fs_invalidate_blocks(sbi, *blkaddr, 1);
} else {
f2fs_update_data_blkaddr(&dn, *blkaddr);
}
@ -1575,7 +1594,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
break;
}
f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1);
f2fs_set_data_blkaddr(dn, NEW_ADDR);
}

View File

@ -806,11 +806,14 @@ retry:
goto out;
}
if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
ret = -EBUSY;
else
p.min_segno = *result;
goto out;
goto out;
}
if (gc_type == FG_GC)
clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
p.min_segno = *result;
goto got_result;
}
ret = -ENODATA;
@ -1412,7 +1415,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
page_address(mpage), PAGE_SIZE);
f2fs_put_page(mpage, 1);
f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))

View File

@ -81,7 +81,7 @@ bool f2fs_may_inline_dentry(struct inode *inode)
void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
{
struct inode *inode = folio_file_mapping(folio)->host;
struct inode *inode = folio->mapping->host;
if (folio_test_uptodate(folio))
return;
@ -352,7 +352,8 @@ process_inline:
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
struct page **res_page)
struct page **res_page,
bool use_hash)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_dir_entry *de;
@ -369,7 +370,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
inline_dentry = inline_data_addr(dir, ipage);
make_dentry_ptr_inline(dir, &d, inline_dentry);
de = f2fs_find_target_dentry(&d, fname, NULL);
de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
unlock_page(ipage);
if (IS_ERR(de)) {
*res_page = ERR_CAST(de);

View File

@ -302,15 +302,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
F2FS_TOTAL_EXTRA_ATTR_SIZE);
return false;
}
if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
f2fs_has_inline_xattr(inode) &&
(!fi->i_inline_xattr_size ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
MAX_INLINE_XATTR_SIZE);
return false;
}
if (f2fs_sb_has_compression(sbi) &&
fi->i_flags & F2FS_COMPR_FL &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
@ -320,6 +311,16 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
}
if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
f2fs_has_inline_xattr(inode) &&
(fi->i_inline_xattr_size < MIN_INLINE_XATTR_SIZE ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, min: %zu, max: %lu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
MIN_INLINE_XATTR_SIZE, MAX_INLINE_XATTR_SIZE);
return false;
}
if (!f2fs_sb_has_extra_attr(sbi)) {
if (f2fs_sb_has_project_quota(sbi)) {
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",

View File

@ -341,6 +341,7 @@ fail_drop:
trace_f2fs_new_inode(inode, err);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
if (nid_free)
set_inode_flag(inode, FI_FREE_NID);
clear_nlink(inode);

View File

@ -558,6 +558,7 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
block_t blkaddr;
int i;
ni->flag = 0;
ni->nid = nid;
retry:
/* Check nat cache */
@ -916,7 +917,7 @@ static int truncate_node(struct dnode_of_data *dn)
}
/* Deallocate node address */
f2fs_invalidate_blocks(sbi, ni.blk_addr);
f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@ -1274,8 +1275,9 @@ int f2fs_remove_inode_page(struct inode *inode)
}
/* remove potential inline_data blocks */
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode))
if (!IS_DEVICE_ALIASING(inode) &&
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
@ -2763,7 +2765,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
if (err)
return err;
f2fs_invalidate_blocks(sbi, ni.blk_addr);
f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);

View File

@ -899,10 +899,8 @@ skip:
* and the f2fs is not read only, check and fix zoned block devices'
* write pointer consistency.
*/
if (!err) {
if (!err)
err = f2fs_check_and_fix_write_pointer(sbi);
ret = err;
}
if (!err)
clear_sbi_flag(sbi, SBI_POR_DOING);

View File

@ -201,6 +201,12 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
clear_inode_flag(inode, FI_ATOMIC_FILE);
if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
/*
* The vfs inode keeps clean during commit, but the f2fs inode
* doesn't. So clear the dirty state after commit and let
* f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
*/
f2fs_inode_synced(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
stat_dec_atomic_inode(inode);
@ -245,7 +251,7 @@ retry:
if (!__is_valid_data_blkaddr(new_addr)) {
if (new_addr == NULL_ADDR)
dec_valid_block_count(sbi, inode, 1);
f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
f2fs_update_data_blkaddr(&dn, new_addr);
} else {
f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
@ -2426,15 +2432,117 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
SIT_I(sbi)->max_mtime = ctime;
}
/*
* NOTE: when updating multiple blocks at the same time, please ensure
* that the consecutive input blocks belong to the same segment.
*/
static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
block_t blkaddr, unsigned int offset, int del)
{
bool exist;
#ifdef CONFIG_F2FS_CHECK_FS
bool mir_exist;
#endif
int i;
int del_count = -del;
f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
for (i = 0; i < del_count; i++) {
exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
mir_exist = f2fs_test_and_clear_bit(offset + i,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
blkaddr + i, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(!exist)) {
f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
del += 1;
} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
/*
* If checkpoints are off, we must not reuse data that
* was used in the previous checkpoint. If it was used
* before, we must track that to know how much space we
* really have.
*/
if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
spin_lock(&sbi->stat_lock);
sbi->unusable_block_count++;
spin_unlock(&sbi->stat_lock);
}
}
if (f2fs_block_unit_discard(sbi) &&
f2fs_test_and_clear_bit(offset + i, se->discard_map))
sbi->discard_blks++;
if (!f2fs_test_bit(offset + i, se->ckpt_valid_map))
se->ckpt_valid_blocks -= 1;
}
return del;
}
static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
block_t blkaddr, unsigned int offset, int del)
{
bool exist;
#ifdef CONFIG_F2FS_CHECK_FS
bool mir_exist;
#endif
exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
mir_exist = f2fs_test_and_set_bit(offset,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
blkaddr, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(exist)) {
f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
f2fs_bug_on(sbi, 1);
se->valid_blocks--;
del = 0;
}
if (f2fs_block_unit_discard(sbi) &&
!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
/*
* SSR should never reuse block which is checkpointed
* or newly invalidated.
*/
if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks += del;
return del;
}
/*
* If releasing blocks, this function supports updating multiple consecutive blocks
* at one time, but please note that these consecutive blocks need to belong to the
* same segment.
*/
static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
{
struct seg_entry *se;
unsigned int segno, offset;
long int new_vblocks;
bool exist;
#ifdef CONFIG_F2FS_CHECK_FS
bool mir_exist;
#endif
segno = GET_SEGNO(sbi, blkaddr);
if (segno == NULL_SEGNO)
@ -2451,73 +2559,10 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
/* Update valid block bitmap */
if (del > 0) {
exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
mir_exist = f2fs_test_and_set_bit(offset,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
blkaddr, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(exist)) {
f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
blkaddr);
f2fs_bug_on(sbi, 1);
se->valid_blocks--;
del = 0;
}
if (f2fs_block_unit_discard(sbi) &&
!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
/*
* SSR should never reuse block which is checkpointed
* or newly invalidated.
*/
if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del);
} else {
exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
mir_exist = f2fs_test_and_clear_bit(offset,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
blkaddr, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(!exist)) {
f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
blkaddr);
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
del = 0;
} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
/*
* If checkpoints are off, we must not reuse data that
* was used in the previous checkpoint. If it was used
* before, we must track that to know how much space we
* really have.
*/
if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
spin_lock(&sbi->stat_lock);
sbi->unusable_block_count++;
spin_unlock(&sbi->stat_lock);
}
}
if (f2fs_block_unit_discard(sbi) &&
f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del);
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks += del;
__mark_sit_entry_dirty(sbi, segno);
@ -2528,25 +2573,43 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
get_sec_entry(sbi, segno)->valid_blocks += del;
}
void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
unsigned int len)
{
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
block_t addr_start = addr, addr_end = addr + len - 1;
unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
f2fs_bug_on(sbi, addr == NULL_ADDR);
if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
return;
f2fs_invalidate_internal_cache(sbi, addr);
f2fs_invalidate_internal_cache(sbi, addr, len);
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
update_segment_mtime(sbi, addr, 0);
update_sit_entry(sbi, addr, -1);
if (seg_num == 1)
cnt = len;
else
cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
/* add it into dirty seglist */
locate_dirty_segment(sbi, segno);
do {
update_segment_mtime(sbi, addr_start, 0);
update_sit_entry(sbi, addr_start, -cnt);
/* add it into dirty seglist */
locate_dirty_segment(sbi, segno);
/* update @addr_start and @cnt and @segno */
addr_start = START_BLOCK(sbi, ++segno);
if (++i == seg_num)
cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
else
cnt = max_blocks;
} while (i <= seg_num);
up_write(&sit_i->sentry_lock);
}
@ -3857,7 +3920,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
goto out;
}
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@ -4049,7 +4112,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
update_sit_entry(sbi, new_blkaddr, 1);
}
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
f2fs_invalidate_internal_cache(sbi, old_blkaddr);
f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
if (!from_gc)
update_segment_mtime(sbi, old_blkaddr, 0);
update_sit_entry(sbi, old_blkaddr, -1);
@ -5405,7 +5468,8 @@ int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
{
int ret;
if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb))
if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
f2fs_hw_is_readonly(sbi))
return 0;
f2fs_notice(sbi, "Checking entire write pointers");
@ -5492,8 +5556,10 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
secno = GET_SEC_FROM_SEG(sbi, segno);
start = GET_SEG_FROM_SEC(sbi, secno);
if (!__is_large_section(sbi))
return get_seg_entry(sbi, start + i)->mtime;
if (!__is_large_section(sbi)) {
mtime = get_seg_entry(sbi, start + i)->mtime;
goto out;
}
for (i = 0; i < usable_segs_per_sec; i++) {
/* for large section, only check the mtime of valid segments */
@ -5506,7 +5572,11 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
if (total_valid_blocks == 0)
return INVALID_MTIME;
return div_u64(mtime, total_valid_blocks);
mtime = div_u64(mtime, total_valid_blocks);
out:
if (unlikely(mtime == INVALID_MTIME))
mtime -= 1;
return mtime;
}
/*

View File

@ -1472,7 +1472,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
le32_to_cpu(sbi->raw_super->segment_count_main);
int i, j;
seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
seq_puts(seq, "format: segment_type|valid_blocks|bitmaps|mtime\n"
"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
for (i = 0; i < total_segs; i++) {
@ -1482,6 +1482,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, " %.2x", se->cur_valid_map[j]);
seq_printf(seq, "| %llx", se->mtime);
seq_putc(seq, '\n');
}
return 0;

View File

@ -1119,11 +1119,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
(unsigned long long)__entry->count)
);
DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
DECLARE_EVENT_CLASS(f2fs__submit_folio_bio,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
TP_ARGS(page, fio),
TP_ARGS(folio, fio),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -1138,9 +1138,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_fast_assign(
__entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
__entry->ino = page_file_mapping(page)->host->i_ino;
__entry->index = page->index;
__entry->dev = folio->mapping->host->i_sb->s_dev;
__entry->ino = folio->mapping->host->i_ino;
__entry->index = folio->index;
__entry->old_blkaddr = fio->old_blkaddr;
__entry->new_blkaddr = fio->new_blkaddr;
__entry->op = fio->op;
@ -1149,7 +1149,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->type = fio->type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
TP_printk("dev = (%d,%d), ino = %lu, folio_index = 0x%lx, "
"oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
@ -1160,22 +1160,22 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
show_block_type(__entry->type))
);
DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_bio,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
TP_ARGS(page, fio),
TP_ARGS(folio, fio),
TP_CONDITION(page->mapping)
TP_CONDITION(folio->mapping)
);
DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write,
DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_write,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
TP_ARGS(page, fio),
TP_ARGS(folio, fio),
TP_CONDITION(page->mapping)
TP_CONDITION(folio->mapping)
);
DECLARE_EVENT_CLASS(f2fs__bio,
@ -1322,12 +1322,11 @@ DECLARE_EVENT_CLASS(f2fs__folio,
),
TP_fast_assign(
__entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev;
__entry->ino = folio_file_mapping(folio)->host->i_ino;
__entry->dev = folio->mapping->host->i_sb->s_dev;
__entry->ino = folio->mapping->host->i_ino;
__entry->type = type;
__entry->dir =
S_ISDIR(folio_file_mapping(folio)->host->i_mode);
__entry->index = folio_index(folio);
__entry->dir = S_ISDIR(folio->mapping->host->i_mode);
__entry->index = folio->index;
__entry->dirty = folio_test_dirty(folio);
__entry->uptodate = folio_test_uptodate(folio);
),