mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00

Until this point, the kernel can use hardware-wrapped keys to do encryption if userspace provides one -- specifically a key in ephemerally-wrapped form. However, no generic way has been provided for userspace to get such a key in the first place. Getting such a key is a two-step process. First, the key needs to be imported from a raw key or generated by the hardware, producing a key in long-term wrapped form. This happens once in the whole lifetime of the key. Second, the long-term wrapped key needs to be converted into ephemerally-wrapped form. This happens each time the key is "unlocked". In Android, these operations are supported in a generic way through KeyMint, a userspace abstraction layer. However, that method is Android-specific and can't be used on other Linux systems, may rely on proprietary libraries, and also misleads people into supporting KeyMint features like rollback resistance that make sense for other KeyMint keys but don't make sense for hardware-wrapped inline encryption keys. Therefore, this patch provides a generic kernel interface for these operations by introducing new block device ioctls: - BLKCRYPTOIMPORTKEY: convert a raw key to long-term wrapped form. - BLKCRYPTOGENERATEKEY: have the hardware generate a new key, then return it in long-term wrapped form. - BLKCRYPTOPREPAREKEY: convert a key from long-term wrapped form to ephemerally-wrapped form. These ioctls are implemented using new operations in blk_crypto_ll_ops. Signed-off-by: Eric Biggers <ebiggers@google.com> Tested-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> # sm8650 Link: https://lore.kernel.org/r/20250204060041.409950-4-ebiggers@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
251 lines
6.4 KiB
C
251 lines
6.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright 2019 Google LLC
|
|
*/
|
|
|
|
#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
|
|
#define __LINUX_BLK_CRYPTO_INTERNAL_H
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blk-mq.h>
|
|
|
|
/* Represents a crypto mode supported by blk-crypto */
|
|
struct blk_crypto_mode {
|
|
const char *name; /* name of this mode, shown in sysfs */
|
|
const char *cipher_str; /* crypto API name (for fallback case) */
|
|
unsigned int keysize; /* key size in bytes */
|
|
unsigned int security_strength; /* security strength in bytes */
|
|
unsigned int ivsize; /* iv size in bytes */
|
|
};
|
|
|
|
extern const struct blk_crypto_mode blk_crypto_modes[];
|
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
|
|
int blk_crypto_sysfs_register(struct gendisk *disk);
|
|
|
|
void blk_crypto_sysfs_unregister(struct gendisk *disk);
|
|
|
|
void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
|
|
unsigned int inc);
|
|
|
|
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
|
|
|
|
bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
|
|
struct bio_crypt_ctx *bc2);
|
|
|
|
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
|
|
bio->bi_crypt_context);
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
|
|
bio->bi_iter.bi_size, req->crypt_ctx);
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
|
|
struct request *next)
|
|
{
|
|
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
|
|
next->crypt_ctx);
|
|
}
|
|
|
|
static inline void blk_crypto_rq_set_defaults(struct request *rq)
|
|
{
|
|
rq->crypt_ctx = NULL;
|
|
rq->crypt_keyslot = NULL;
|
|
}
|
|
|
|
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|
{
|
|
return rq->crypt_ctx;
|
|
}
|
|
|
|
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
|
|
{
|
|
return rq->crypt_keyslot;
|
|
}
|
|
|
|
blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
|
|
const struct blk_crypto_key *key,
|
|
struct blk_crypto_keyslot **slot_ptr);
|
|
|
|
void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
|
|
|
|
int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
|
|
const struct blk_crypto_key *key);
|
|
|
|
bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
|
|
const struct blk_crypto_config *cfg);
|
|
|
|
int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
|
|
void __user *argp);
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
static inline int blk_crypto_sysfs_register(struct gendisk *disk)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void blk_crypto_sysfs_unregister(struct gendisk *disk)
|
|
{
|
|
}
|
|
|
|
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
|
|
struct request *next)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
|
|
|
|
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
|
|
void __user *argp)
|
|
{
|
|
return -ENOTTY;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
|
|
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__bio_crypt_advance(bio, bytes);
|
|
}
|
|
|
|
void __bio_crypt_free_ctx(struct bio *bio);
|
|
static inline void bio_crypt_free_ctx(struct bio *bio)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__bio_crypt_free_ctx(bio);
|
|
}
|
|
|
|
static inline void bio_crypt_do_front_merge(struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
if (bio_has_crypt_ctx(bio))
|
|
memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
|
|
sizeof(rq->crypt_ctx->bc_dun));
|
|
#endif
|
|
}
|
|
|
|
bool __blk_crypto_bio_prep(struct bio **bio_ptr);
|
|
static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
|
|
{
|
|
if (bio_has_crypt_ctx(*bio_ptr))
|
|
return __blk_crypto_bio_prep(bio_ptr);
|
|
return true;
|
|
}
|
|
|
|
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
|
|
static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
return __blk_crypto_rq_get_keyslot(rq);
|
|
return BLK_STS_OK;
|
|
}
|
|
|
|
void __blk_crypto_rq_put_keyslot(struct request *rq);
|
|
static inline void blk_crypto_rq_put_keyslot(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_has_keyslot(rq))
|
|
__blk_crypto_rq_put_keyslot(rq);
|
|
}
|
|
|
|
void __blk_crypto_free_request(struct request *rq);
|
|
static inline void blk_crypto_free_request(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
__blk_crypto_free_request(rq);
|
|
}
|
|
|
|
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
|
|
gfp_t gfp_mask);
|
|
/**
|
|
* blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
|
|
* is inserted
|
|
* @rq: The request to prepare
|
|
* @bio: The first bio being inserted into the request
|
|
* @gfp_mask: Memory allocation flags
|
|
*
|
|
* Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
|
|
* @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
|
|
*/
|
|
static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
|
|
gfp_t gfp_mask)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
|
|
|
|
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
|
|
|
|
bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
|
|
|
|
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
|
|
|
static inline int
|
|
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
|
|
{
|
|
pr_warn_once("crypto API fallback is disabled\n");
|
|
return -ENOPKG;
|
|
}
|
|
|
|
static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
|
|
{
|
|
pr_warn_once("crypto API fallback disabled; failing request.\n");
|
|
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
|
|
return false;
|
|
}
|
|
|
|
static inline int
|
|
blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
|
|
|
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
|