mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 16:47:42 +00:00
netfs: Drop the error arg from netfs_read_subreq_terminated()
Drop the error argument from netfs_read_subreq_terminated() in favour of passing the value in subreq->error. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241216204124.3752367-9-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
751e213f9f
commit
360157829e
@ -87,7 +87,8 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
||||
}
|
||||
|
||||
netfs_read_subreq_terminated(subreq, err, false);
|
||||
subreq->error = err;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -246,7 +246,8 @@ static void afs_fetch_data_notify(struct afs_operation *op)
|
||||
subreq->rreq->i_size = req->file_size;
|
||||
if (req->pos + req->actual_len >= req->file_size)
|
||||
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
|
||||
netfs_read_subreq_terminated(subreq, error, false);
|
||||
subreq->error = error;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
req->subreq = NULL;
|
||||
} else if (req->done) {
|
||||
req->done(req);
|
||||
@ -301,8 +302,10 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
|
||||
|
||||
op = afs_alloc_operation(req->key, vnode->volume);
|
||||
if (IS_ERR(op)) {
|
||||
if (req->subreq)
|
||||
netfs_read_subreq_terminated(req->subreq, PTR_ERR(op), false);
|
||||
if (req->subreq) {
|
||||
req->subreq->error = PTR_ERR(op);
|
||||
netfs_read_subreq_terminated(req->subreq, false);
|
||||
}
|
||||
return PTR_ERR(op);
|
||||
}
|
||||
|
||||
@ -320,8 +323,10 @@ static void afs_read_worker(struct work_struct *work)
|
||||
struct afs_read *fsreq;
|
||||
|
||||
fsreq = afs_alloc_read(GFP_NOFS);
|
||||
if (!fsreq)
|
||||
return netfs_read_subreq_terminated(subreq, -ENOMEM, false);
|
||||
if (!fsreq) {
|
||||
subreq->error = -ENOMEM;
|
||||
return netfs_read_subreq_terminated(subreq, false);
|
||||
}
|
||||
|
||||
fsreq->subreq = subreq;
|
||||
fsreq->pos = subreq->start + subreq->transferred;
|
||||
|
@ -253,8 +253,9 @@ static void finish_netfs_read(struct ceph_osd_request *req)
|
||||
subreq->transferred = err;
|
||||
err = 0;
|
||||
}
|
||||
subreq->error = err;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
|
||||
netfs_read_subreq_terminated(subreq, err, false);
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
iput(req->r_inode);
|
||||
ceph_dec_osd_stopping_blocker(fsc->mdsc);
|
||||
}
|
||||
@ -314,7 +315,9 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
|
||||
|
||||
ceph_mdsc_put_request(req);
|
||||
out:
|
||||
netfs_read_subreq_terminated(subreq, err, false);
|
||||
subreq->error = err;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -426,8 +429,10 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
ceph_osdc_start_request(req->r_osdc, req);
|
||||
out:
|
||||
ceph_osdc_put_request(req);
|
||||
if (err)
|
||||
netfs_read_subreq_terminated(subreq, err, false);
|
||||
if (err) {
|
||||
subreq->error = err;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
}
|
||||
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
|
||||
}
|
||||
|
||||
|
@ -148,14 +148,13 @@ static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error
|
||||
{
|
||||
struct netfs_io_subrequest *subreq = priv;
|
||||
|
||||
if (transferred_or_error < 0) {
|
||||
netfs_read_subreq_terminated(subreq, transferred_or_error, was_async);
|
||||
return;
|
||||
}
|
||||
|
||||
if (transferred_or_error > 0)
|
||||
if (transferred_or_error > 0) {
|
||||
subreq->transferred += transferred_or_error;
|
||||
netfs_read_subreq_terminated(subreq, 0, was_async);
|
||||
subreq->error = 0;
|
||||
} else {
|
||||
subreq->error = transferred_or_error;
|
||||
}
|
||||
netfs_read_subreq_terminated(subreq, was_async);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -255,7 +254,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
if (slice < 0)
|
||||
goto prep_iter_failed;
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
netfs_read_subreq_terminated(subreq, 0, false);
|
||||
subreq->error = 0;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,20 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
|
||||
}
|
||||
|
||||
memset(subreq, 0, kmem_cache_size(cache));
|
||||
INIT_WORK(&subreq->work, NULL);
|
||||
|
||||
switch (rreq->origin) {
|
||||
case NETFS_READAHEAD:
|
||||
case NETFS_READPAGE:
|
||||
case NETFS_READ_GAPS:
|
||||
case NETFS_READ_FOR_WRITE:
|
||||
case NETFS_DIO_READ:
|
||||
INIT_WORK(&subreq->work, netfs_read_subreq_termination_worker);
|
||||
break;
|
||||
default:
|
||||
INIT_WORK(&subreq->work, NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&subreq->rreq_link);
|
||||
refcount_set(&subreq->ref, 2);
|
||||
subreq->rreq = rreq;
|
||||
|
@ -452,28 +452,26 @@ EXPORT_SYMBOL(netfs_read_subreq_progress);
|
||||
/**
|
||||
* netfs_read_subreq_terminated - Note the termination of an I/O operation.
|
||||
* @subreq: The I/O request that has terminated.
|
||||
* @error: Error code indicating type of completion.
|
||||
* @was_async: The termination was asynchronous
|
||||
* @was_async: True if we're in an asynchronous context.
|
||||
*
|
||||
* This tells the read helper that a contributory I/O operation has terminated,
|
||||
* one way or another, and that it should integrate the results.
|
||||
*
|
||||
* The caller indicates the outcome of the operation through @error, supplying
|
||||
* 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY
|
||||
* is set) or a negative error code. The helper will look after reissuing I/O
|
||||
* operations as appropriate and writing downloaded data to the cache.
|
||||
* The caller indicates the outcome of the operation through @subreq->error,
|
||||
* supplying 0 to indicate a successful or retryable transfer (if
|
||||
* NETFS_SREQ_NEED_RETRY is set) or a negative error code. The helper will
|
||||
* look after reissuing I/O operations as appropriate and writing downloaded
|
||||
* data to the cache.
|
||||
*
|
||||
* Before calling, the filesystem should update subreq->transferred to track
|
||||
* the amount of data copied into the output buffer.
|
||||
*
|
||||
* If @was_async is true, the caller might be running in softirq or interrupt
|
||||
* context and we can't sleep.
|
||||
*/
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
int error, bool was_async)
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_async)
|
||||
{
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
|
||||
might_sleep();
|
||||
|
||||
switch (subreq->source) {
|
||||
case NETFS_READ_FROM_CACHE:
|
||||
netfs_stat(&netfs_n_rh_read_done);
|
||||
@ -491,7 +489,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
* If the read completed validly short, then we can clear the
|
||||
* tail before going on to unlock the folios.
|
||||
*/
|
||||
if (error == 0 && subreq->transferred < subreq->len &&
|
||||
if (subreq->error == 0 && subreq->transferred < subreq->len &&
|
||||
(test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) ||
|
||||
test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) {
|
||||
netfs_clear_unread(subreq);
|
||||
@ -511,7 +509,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
/* Deal with retry requests, short reads and errors. If we retry
|
||||
* but don't make progress, we abandon the attempt.
|
||||
*/
|
||||
if (!error && subreq->transferred < subreq->len) {
|
||||
if (!subreq->error && subreq->transferred < subreq->len) {
|
||||
if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
|
||||
} else {
|
||||
@ -528,16 +526,15 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
|
||||
} else {
|
||||
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
error = -ENODATA;
|
||||
subreq->error = -ENODATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
subreq->error = error;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
|
||||
|
||||
if (unlikely(error < 0)) {
|
||||
trace_netfs_failure(rreq, subreq, error, netfs_fail_read);
|
||||
if (unlikely(subreq->error < 0)) {
|
||||
trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
|
||||
if (subreq->source == NETFS_READ_FROM_CACHE) {
|
||||
netfs_stat(&netfs_n_rh_read_failed);
|
||||
} else {
|
||||
@ -553,3 +550,19 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_read_subreq_terminated);
|
||||
|
||||
/**
|
||||
* netfs_read_subreq_termination_worker - Workqueue helper for read termination
|
||||
* @work: The subreq->work in the I/O request that has been terminated.
|
||||
*
|
||||
* Helper function to jump to netfs_read_subreq_terminated() from the
|
||||
* subrequest work item.
|
||||
*/
|
||||
void netfs_read_subreq_termination_worker(struct work_struct *work)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq =
|
||||
container_of(work, struct netfs_io_subrequest, work);
|
||||
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_read_subreq_termination_worker);
|
||||
|
@ -314,8 +314,10 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
|
||||
&nfs_async_read_completion_ops);
|
||||
|
||||
netfs = nfs_netfs_alloc(sreq);
|
||||
if (!netfs)
|
||||
return netfs_read_subreq_terminated(sreq, -ENOMEM, false);
|
||||
if (!netfs) {
|
||||
sreq->error = -ENOMEM;
|
||||
return netfs_read_subreq_terminated(sreq, false);
|
||||
}
|
||||
|
||||
pgio.pg_netfs = netfs; /* used in completion */
|
||||
|
||||
|
@ -74,7 +74,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
|
||||
*/
|
||||
netfs->sreq->transferred = min_t(s64, netfs->sreq->len,
|
||||
atomic64_read(&netfs->transferred));
|
||||
netfs_read_subreq_terminated(netfs->sreq, netfs->error, false);
|
||||
netfs->sreq->error = netfs->error;
|
||||
netfs_read_subreq_terminated(netfs->sreq, false);
|
||||
kfree(netfs);
|
||||
}
|
||||
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
|
||||
|
@ -1258,14 +1258,6 @@ openRetry:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cifs_readv_worker(struct work_struct *work)
|
||||
{
|
||||
struct cifs_io_subrequest *rdata =
|
||||
container_of(work, struct cifs_io_subrequest, subreq.work);
|
||||
|
||||
netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_readv_callback(struct mid_q_entry *mid)
|
||||
{
|
||||
@ -1333,8 +1325,8 @@ cifs_readv_callback(struct mid_q_entry *mid)
|
||||
}
|
||||
|
||||
rdata->credits.value = 0;
|
||||
rdata->subreq.error = rdata->result;
|
||||
rdata->subreq.transferred += rdata->got_bytes;
|
||||
INIT_WORK(&rdata->subreq.work, cifs_readv_worker);
|
||||
queue_work(cifsiod_wq, &rdata->subreq.work);
|
||||
release_mid(mid);
|
||||
add_credits(server, &credits, 0);
|
||||
|
@ -227,7 +227,8 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
return;
|
||||
|
||||
failed:
|
||||
netfs_read_subreq_terminated(subreq, rc, false);
|
||||
subreq->error = rc;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4500,14 +4500,6 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void smb2_readv_worker(struct work_struct *work)
|
||||
{
|
||||
struct cifs_io_subrequest *rdata =
|
||||
container_of(work, struct cifs_io_subrequest, subreq.work);
|
||||
|
||||
netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
|
||||
}
|
||||
|
||||
static void
|
||||
smb2_readv_callback(struct mid_q_entry *mid)
|
||||
{
|
||||
@ -4621,9 +4613,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
||||
server->credits, server->in_flight,
|
||||
0, cifs_trace_rw_credits_read_response_clear);
|
||||
rdata->credits.value = 0;
|
||||
rdata->subreq.error = rdata->result;
|
||||
rdata->subreq.transferred += rdata->got_bytes;
|
||||
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
|
||||
INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
|
||||
queue_work(cifsiod_wq, &rdata->subreq.work);
|
||||
release_mid(mid);
|
||||
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
|
||||
|
@ -427,10 +427,9 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
|
||||
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
|
||||
|
||||
/* (Sub)request management API. */
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
|
||||
bool was_async);
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
int error, bool was_async);
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, bool was_async);
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_async);
|
||||
void netfs_read_subreq_termination_worker(struct work_struct *work);
|
||||
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
|
||||
enum netfs_sreq_ref_trace what);
|
||||
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
|
||||
|
Loading…
x
Reference in New Issue
Block a user