mirror of
https://github.com/torvalds/linux.git
synced 2025-04-12 16:47:42 +00:00

init_once is called when an object doesn't come from the cache, and hence needs initial clearing of certain members. While the whole struct could get cleared by memset() in that case, a few of the cache members are large enough that this may cause unnecessary overhead if the caches used aren't large enough to satisfy the workload. For those cases, some churn of kmalloc+kfree is to be expected. Ensure that the 3 users that need clearing put the members they need cleared at the start of the struct, and wrap the rest of the struct in a struct group so the offset is known. While at it, improve the interaction with KASAN such that when/if KASAN writes to members inside the struct that should be retained over caching, it won't trip over itself. For rw and net, the retaining of the iovec over caching is disabled if KASAN is enabled. A helper will free and clear those members in that case. Signed-off-by: Jens Axboe <axboe@kernel.dk>
46 lines
1.5 KiB
C
46 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
struct io_meta_state {
|
|
u32 seed;
|
|
struct iov_iter_state iter_meta;
|
|
};
|
|
|
|
struct io_async_rw {
|
|
size_t bytes_done;
|
|
struct iovec *free_iovec;
|
|
struct_group(clear,
|
|
struct iov_iter iter;
|
|
struct iov_iter_state iter_state;
|
|
struct iovec fast_iov;
|
|
int free_iov_nr;
|
|
/*
|
|
* wpq is for buffered io, while meta fields are used with
|
|
* direct io
|
|
*/
|
|
union {
|
|
struct wait_page_queue wpq;
|
|
struct {
|
|
struct uio_meta meta;
|
|
struct io_meta_state meta_state;
|
|
};
|
|
};
|
|
);
|
|
};
|
|
|
|
int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_read(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_write(struct io_kiocb *req, unsigned int issue_flags);
|
|
void io_readv_writev_cleanup(struct io_kiocb *req);
|
|
void io_rw_fail(struct io_kiocb *req);
|
|
void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts);
|
|
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
|
|
void io_rw_cache_free(const void *entry);
|