mirror of
https://github.com/torvalds/linux.git
synced 2025-04-06 00:16:18 +00:00
crypto: scatterwalk - simplify map and unmap calling convention
Now that the address returned by scatterwalk_map() is always being stored into the same struct scatter_walk that is passed in, make scatterwalk_map() do so itself and return void. Similarly, now that scatterwalk_unmap() is always being passed the address field within a struct scatter_walk, make scatterwalk_unmap() take a pointer to struct scatter_walk instead of the address directly. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
ea3d35467b
commit
7450ebd29c
@ -871,8 +871,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* XXX */
|
||||
scatterwalk_unmap(gw->walk.addr);
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
|
||||
gw->ptr = gw->buf;
|
||||
gw->nbytes = sizeof(gw->buf);
|
||||
|
@ -39,18 +39,6 @@ static const struct crypto_type crypto_skcipher_type;
|
||||
|
||||
static int skcipher_walk_next(struct skcipher_walk *walk);
|
||||
|
||||
static inline void skcipher_map_src(struct skcipher_walk *walk)
|
||||
{
|
||||
/* XXX */
|
||||
walk->in.__addr = scatterwalk_map(&walk->in);
|
||||
}
|
||||
|
||||
static inline void skcipher_map_dst(struct skcipher_walk *walk)
|
||||
{
|
||||
/* XXX */
|
||||
walk->out.__addr = scatterwalk_map(&walk->out);
|
||||
}
|
||||
|
||||
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
|
||||
{
|
||||
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
@ -101,8 +89,8 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res)
|
||||
scatterwalk_done_src(&walk->in, n);
|
||||
} else if (walk->flags & SKCIPHER_WALK_COPY) {
|
||||
scatterwalk_advance(&walk->in, n);
|
||||
skcipher_map_dst(walk);
|
||||
memcpy(walk->dst.virt.addr, walk->page, n);
|
||||
scatterwalk_map(&walk->out);
|
||||
memcpy(walk->out.addr, walk->page, n);
|
||||
} else { /* SKCIPHER_WALK_SLOW */
|
||||
if (res > 0) {
|
||||
/*
|
||||
@ -186,9 +174,9 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
|
||||
{
|
||||
void *tmp = walk->page;
|
||||
|
||||
skcipher_map_src(walk);
|
||||
memcpy(tmp, walk->src.virt.addr, walk->nbytes);
|
||||
scatterwalk_unmap(walk->src.virt.addr);
|
||||
scatterwalk_map(&walk->in);
|
||||
memcpy(tmp, walk->in.addr, walk->nbytes);
|
||||
scatterwalk_unmap(&walk->in);
|
||||
/*
|
||||
* walk->in is advanced later when the number of bytes actually
|
||||
* processed (which might be less than walk->nbytes) is known.
|
||||
@ -208,12 +196,12 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
|
||||
diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
|
||||
(u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
|
||||
|
||||
skcipher_map_dst(walk);
|
||||
walk->in.__addr = walk->dst.virt.addr;
|
||||
scatterwalk_map(&walk->out);
|
||||
walk->in.__addr = walk->out.__addr;
|
||||
|
||||
if (diff) {
|
||||
walk->flags |= SKCIPHER_WALK_DIFF;
|
||||
skcipher_map_src(walk);
|
||||
scatterwalk_map(&walk->in);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -97,23 +97,28 @@ static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
|
||||
scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
|
||||
}
|
||||
|
||||
static inline void *scatterwalk_map(struct scatter_walk *walk)
|
||||
static inline void scatterwalk_map(struct scatter_walk *walk)
|
||||
{
|
||||
struct page *base_page = sg_page(walk->sg);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM))
|
||||
return kmap_local_page(base_page + (walk->offset >> PAGE_SHIFT)) +
|
||||
offset_in_page(walk->offset);
|
||||
/*
|
||||
* When !HIGHMEM we allow the walker to return segments that span a page
|
||||
* boundary; see scatterwalk_clamp(). To make it clear that in this
|
||||
* case we're working in the linear buffer of the whole sg entry in the
|
||||
* kernel's direct map rather than within the mapped buffer of a single
|
||||
* page, compute the address as an offset from the page_address() of the
|
||||
* first page of the sg entry. Either way the result is the address in
|
||||
* the direct map, but this makes it clearer what is really going on.
|
||||
*/
|
||||
return page_address(base_page) + walk->offset;
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM)) {
|
||||
walk->__addr = kmap_local_page(base_page +
|
||||
(walk->offset >> PAGE_SHIFT)) +
|
||||
offset_in_page(walk->offset);
|
||||
} else {
|
||||
/*
|
||||
* When !HIGHMEM we allow the walker to return segments that
|
||||
* span a page boundary; see scatterwalk_clamp(). To make it
|
||||
* clear that in this case we're working in the linear buffer of
|
||||
* the whole sg entry in the kernel's direct map rather than
|
||||
* within the mapped buffer of a single page, compute the
|
||||
* address as an offset from the page_address() of the first
|
||||
* page of the sg entry. Either way the result is the address
|
||||
* in the direct map, but this makes it clearer what is really
|
||||
* going on.
|
||||
*/
|
||||
walk->__addr = page_address(base_page) + walk->offset;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -132,14 +137,14 @@ static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
|
||||
{
|
||||
unsigned int nbytes = scatterwalk_clamp(walk, total);
|
||||
|
||||
walk->__addr = scatterwalk_map(walk);
|
||||
scatterwalk_map(walk);
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static inline void scatterwalk_unmap(const void *vaddr)
|
||||
static inline void scatterwalk_unmap(struct scatter_walk *walk)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM))
|
||||
kunmap_local(vaddr);
|
||||
kunmap_local(walk->__addr);
|
||||
}
|
||||
|
||||
static inline void scatterwalk_advance(struct scatter_walk *walk,
|
||||
@ -159,7 +164,7 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
|
||||
static inline void scatterwalk_done_src(struct scatter_walk *walk,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
scatterwalk_unmap(walk->addr);
|
||||
scatterwalk_unmap(walk);
|
||||
scatterwalk_advance(walk, nbytes);
|
||||
}
|
||||
|
||||
@ -175,7 +180,7 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk,
|
||||
static inline void scatterwalk_done_dst(struct scatter_walk *walk,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
scatterwalk_unmap(walk->addr);
|
||||
scatterwalk_unmap(walk);
|
||||
/*
|
||||
* Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
|
||||
* relying on flush_dcache_page() being a no-op when not implemented,
|
||||
|
Loading…
x
Reference in New Issue
Block a user