mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00
vfs-6.15-rc1.ceph
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZ90r/AAKCRCRxhvAZXjc onzyAP9QnVuYdNZhgpl40B+TnqA8F9/QAwKjaudAiC6kYWXPrgEA3SLTcmenjfzP 8+9OqC3WVcfTWWKXB4IDK18Yk7veVQg= =Eu3R -----END PGP SIGNATURE----- Merge tag 'vfs-6.15-rc1.ceph' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull vfs ceph updates from Christian Brauner: "This contains the work to remove access to page->index from ceph and fixes the test failure observed for ceph with generic/421 by refactoring ceph_writepages_start()" * tag 'vfs-6.15-rc1.ceph' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: fscrypt: Change fscrypt_encrypt_pagecache_blocks() to take a folio ceph: Fix error handling in fill_readdir_cache() fs: Remove page_mkwrite_check_truncate() ceph: Pass a folio to ceph_allocate_page_array() ceph: Convert ceph_move_dirty_page_in_page_array() to move_dirty_folio_in_page_array() ceph: Remove uses of page from ceph_process_folio_batch() ceph: Convert ceph_check_page_before_write() to use a folio ceph: Convert writepage_nounlock() to write_folio_nounlock() ceph: Convert ceph_readdir_cache_control to store a folio ceph: Convert ceph_find_incompatible() to take a folio ceph: Use a folio in ceph_page_mkwrite() ceph: Remove ceph_writepage() ceph: fix generic/421 test failure ceph: introduce ceph_submit_write() method ceph: introduce ceph_process_folio_batch() method ceph: extend ceph_writeback_ctl for ceph_writepages_start() refactoring
This commit is contained in:
commit
e63046adef
1275
fs/ceph/addr.c
1275
fs/ceph/addr.c
File diff suppressed because it is too large
Load Diff
@ -141,17 +141,18 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
|
||||
if (ptr_pos >= i_size_read(dir))
|
||||
return NULL;
|
||||
|
||||
if (!cache_ctl->page || ptr_pgoff != cache_ctl->page->index) {
|
||||
if (!cache_ctl->folio || ptr_pgoff != cache_ctl->folio->index) {
|
||||
ceph_readdir_cache_release(cache_ctl);
|
||||
cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
|
||||
if (!cache_ctl->page) {
|
||||
doutc(cl, " page %lu not found\n", ptr_pgoff);
|
||||
cache_ctl->folio = filemap_lock_folio(&dir->i_data, ptr_pgoff);
|
||||
if (IS_ERR(cache_ctl->folio)) {
|
||||
cache_ctl->folio = NULL;
|
||||
doutc(cl, " folio %lu not found\n", ptr_pgoff);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
/* reading/filling the cache are serialized by
|
||||
i_rwsem, no need to use page lock */
|
||||
unlock_page(cache_ctl->page);
|
||||
cache_ctl->dentries = kmap(cache_ctl->page);
|
||||
i_rwsem, no need to use folio lock */
|
||||
folio_unlock(cache_ctl->folio);
|
||||
cache_ctl->dentries = kmap_local_folio(cache_ctl->folio, 0);
|
||||
}
|
||||
|
||||
cache_ctl->index = idx & idx_mask;
|
||||
|
@ -1845,10 +1845,9 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
|
||||
|
||||
void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
|
||||
{
|
||||
if (ctl->page) {
|
||||
kunmap(ctl->page);
|
||||
put_page(ctl->page);
|
||||
ctl->page = NULL;
|
||||
if (ctl->folio) {
|
||||
folio_release_kmap(ctl->folio, ctl->dentries);
|
||||
ctl->folio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1862,20 +1861,26 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
|
||||
unsigned idx = ctl->index % nsize;
|
||||
pgoff_t pgoff = ctl->index / nsize;
|
||||
|
||||
if (!ctl->page || pgoff != ctl->page->index) {
|
||||
if (!ctl->folio || pgoff != ctl->folio->index) {
|
||||
ceph_readdir_cache_release(ctl);
|
||||
fgf_t fgf = FGP_LOCK;
|
||||
|
||||
if (idx == 0)
|
||||
ctl->page = grab_cache_page(&dir->i_data, pgoff);
|
||||
else
|
||||
ctl->page = find_lock_page(&dir->i_data, pgoff);
|
||||
if (!ctl->page) {
|
||||
fgf |= FGP_ACCESSED | FGP_CREAT;
|
||||
|
||||
ctl->folio = __filemap_get_folio(&dir->i_data, pgoff,
|
||||
fgf, mapping_gfp_mask(&dir->i_data));
|
||||
if (IS_ERR(ctl->folio)) {
|
||||
int err = PTR_ERR(ctl->folio);
|
||||
|
||||
ctl->folio = NULL;
|
||||
ctl->index = -1;
|
||||
return idx == 0 ? -ENOMEM : 0;
|
||||
return idx == 0 ? err : 0;
|
||||
}
|
||||
/* reading/filling the cache are serialized by
|
||||
* i_rwsem, no need to use page lock */
|
||||
unlock_page(ctl->page);
|
||||
ctl->dentries = kmap(ctl->page);
|
||||
* i_rwsem, no need to use folio lock */
|
||||
folio_unlock(ctl->folio);
|
||||
ctl->dentries = kmap_local_folio(ctl->folio, 0);
|
||||
if (idx == 0)
|
||||
memset(ctl->dentries, 0, PAGE_SIZE);
|
||||
}
|
||||
|
@ -5489,6 +5489,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
|
||||
spin_lock_init(&mdsc->stopping_lock);
|
||||
atomic_set(&mdsc->stopping_blockers, 0);
|
||||
init_completion(&mdsc->stopping_waiter);
|
||||
atomic64_set(&mdsc->dirty_folios, 0);
|
||||
init_waitqueue_head(&mdsc->flush_end_wq);
|
||||
init_waitqueue_head(&mdsc->session_close_wq);
|
||||
INIT_LIST_HEAD(&mdsc->waiting_for_map);
|
||||
mdsc->quotarealms_inodes = RB_ROOT;
|
||||
|
@ -458,6 +458,9 @@ struct ceph_mds_client {
|
||||
atomic_t stopping_blockers;
|
||||
struct completion stopping_waiter;
|
||||
|
||||
atomic64_t dirty_folios;
|
||||
wait_queue_head_t flush_end_wq;
|
||||
|
||||
atomic64_t quotarealms_count; /* # realms with quota */
|
||||
/*
|
||||
* We keep a list of inodes we don't see in the mountpoint but that we
|
||||
|
@ -1563,6 +1563,17 @@ static void ceph_kill_sb(struct super_block *s)
|
||||
*/
|
||||
sync_filesystem(s);
|
||||
|
||||
if (atomic64_read(&mdsc->dirty_folios) > 0) {
|
||||
wait_queue_head_t *wq = &mdsc->flush_end_wq;
|
||||
long timeleft = wait_event_killable_timeout(*wq,
|
||||
atomic64_read(&mdsc->dirty_folios) <= 0,
|
||||
fsc->client->options->mount_timeout);
|
||||
if (!timeleft) /* timed out */
|
||||
pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
|
||||
else if (timeleft < 0) /* killed */
|
||||
pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
|
||||
}
|
||||
|
||||
spin_lock(&mdsc->stopping_lock);
|
||||
mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
|
||||
wait = !!atomic_read(&mdsc->stopping_blockers);
|
||||
|
@ -903,7 +903,7 @@ ceph_find_rw_context(struct ceph_file_info *cf)
|
||||
}
|
||||
|
||||
struct ceph_readdir_cache_control {
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
struct dentry **dentries;
|
||||
int index;
|
||||
};
|
||||
|
@ -153,8 +153,8 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
|
||||
* @page: the locked pagecache page containing the data to encrypt
|
||||
* fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache folio
|
||||
* @folio: the locked pagecache folio containing the data to encrypt
|
||||
* @len: size of the data to encrypt, in bytes
|
||||
* @offs: offset within @page of the data to encrypt, in bytes
|
||||
* @gfp_flags: memory allocation flags; see details below
|
||||
@ -177,23 +177,21 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
|
||||
*
|
||||
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
|
||||
*/
|
||||
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags)
|
||||
|
||||
struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
|
||||
size_t len, size_t offs, gfp_t gfp_flags)
|
||||
{
|
||||
const struct inode *inode = page->mapping->host;
|
||||
const struct inode *inode = folio->mapping->host;
|
||||
const struct fscrypt_inode_info *ci = inode->i_crypt_info;
|
||||
const unsigned int du_bits = ci->ci_data_unit_bits;
|
||||
const unsigned int du_size = 1U << du_bits;
|
||||
struct page *ciphertext_page;
|
||||
u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
|
||||
u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
|
||||
(offs >> du_bits);
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
if (WARN_ON_ONCE(!PageLocked(page)))
|
||||
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
|
||||
if (WARN_ON_ONCE(!folio_test_locked(folio)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
|
||||
@ -205,7 +203,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
|
||||
for (i = offs; i < offs + len; i += du_size, index++) {
|
||||
err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
|
||||
page, ciphertext_page,
|
||||
&folio->page, ciphertext_page,
|
||||
du_size, i, gfp_flags);
|
||||
if (err) {
|
||||
fscrypt_free_bounce_page(ciphertext_page);
|
||||
@ -213,7 +211,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
}
|
||||
}
|
||||
SetPagePrivate(ciphertext_page);
|
||||
set_page_private(ciphertext_page, (unsigned long)page);
|
||||
set_page_private(ciphertext_page, (unsigned long)folio);
|
||||
return ciphertext_page;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
|
||||
|
@ -522,7 +522,7 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
|
||||
if (io->io_bio)
|
||||
gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
|
||||
retry_encrypt:
|
||||
bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
|
||||
bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
|
||||
enc_bytes, 0, gfp_flags);
|
||||
if (IS_ERR(bounce_page)) {
|
||||
ret = PTR_ERR(bounce_page);
|
||||
|
@ -2500,7 +2500,7 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
|
||||
return 0;
|
||||
|
||||
retry_encrypt:
|
||||
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
|
||||
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
|
||||
PAGE_SIZE, 0, gfp_flags);
|
||||
if (IS_ERR(fio->encrypted_page)) {
|
||||
/* flush pending IOs and wait for a while in the ENOMEM case */
|
||||
|
@ -310,10 +310,8 @@ static inline void fscrypt_prepare_dentry(struct dentry *dentry,
|
||||
/* crypto.c */
|
||||
void fscrypt_enqueue_decrypt_work(struct work_struct *);
|
||||
|
||||
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags);
|
||||
struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
|
||||
size_t len, size_t offs, gfp_t gfp_flags);
|
||||
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num, gfp_t gfp_flags);
|
||||
@ -480,10 +478,8 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags)
|
||||
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
|
||||
size_t len, size_t offs, gfp_t gfp_flags)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
@ -1604,34 +1604,6 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* page_mkwrite_check_truncate - check if page was truncated
|
||||
* @page: the page to check
|
||||
* @inode: the inode to check the page against
|
||||
*
|
||||
* Returns the number of bytes in the page up to EOF,
|
||||
* or -EFAULT if the page was truncated.
|
||||
*/
|
||||
static inline int page_mkwrite_check_truncate(struct page *page,
|
||||
struct inode *inode)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
pgoff_t index = size >> PAGE_SHIFT;
|
||||
int offset = offset_in_page(size);
|
||||
|
||||
if (page->mapping != inode->i_mapping)
|
||||
return -EFAULT;
|
||||
|
||||
/* page is wholly inside EOF */
|
||||
if (page->index < index)
|
||||
return PAGE_SIZE;
|
||||
/* page is wholly past EOF */
|
||||
if (page->index > index || !offset)
|
||||
return -EFAULT;
|
||||
/* page is partially inside EOF */
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* i_blocks_per_folio - How many blocks fit in this folio.
|
||||
* @inode: The inode which contains the blocks.
|
||||
|
Loading…
x
Reference in New Issue
Block a user