summaryrefslogtreecommitdiff
path: root/fs/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'fs/crypto')
-rw-r--r--fs/crypto/bio.c29
-rw-r--r--fs/crypto/crypto.c124
-rw-r--r--fs/crypto/fscrypt_private.h25
-rw-r--r--fs/crypto/keyring.c6
-rw-r--r--fs/crypto/keysetup.c158
-rw-r--r--fs/crypto/keysetup_v1.c4
-rw-r--r--fs/crypto/policy.c41
7 files changed, 122 insertions, 265 deletions
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 82da2510721f..1f4b8a277060 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -26,7 +26,7 @@
#include <linux/namei.h>
#include "fscrypt_private.h"
-static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
+void fscrypt_decrypt_bio(struct bio *bio)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
@@ -37,37 +37,10 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bv->bv_offset);
if (ret)
SetPageError(page);
- else if (done)
- SetPageUptodate(page);
- if (done)
- unlock_page(page);
}
}
-
-void fscrypt_decrypt_bio(struct bio *bio)
-{
- __fscrypt_decrypt_bio(bio, false);
-}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
-static void completion_pages(struct work_struct *work)
-{
- struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
- struct bio *bio = ctx->bio;
-
- __fscrypt_decrypt_bio(bio, true);
- fscrypt_release_ctx(ctx);
- bio_put(bio);
-}
-
-void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- fscrypt_enqueue_decrypt_work(&ctx->work);
-}
-EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
-
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 32a7ad0098cc..3719efa546c6 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,29 +27,20 @@
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
-#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
-static LIST_HEAD(fscrypt_free_ctxs);
-static DEFINE_SPINLOCK(fscrypt_ctx_lock);
-
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
@@ -58,62 +49,6 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
-/**
- * fscrypt_release_ctx() - Release a decryption context
- * @ctx: The decryption context to release.
- *
- * If the decryption context was allocated from the pre-allocated pool, return
- * it to that pool. Else, free it.
- */
-void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(fscrypt_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- }
-}
-EXPORT_SYMBOL(fscrypt_release_ctx);
-
-/**
- * fscrypt_get_ctx() - Get a decryption context
- * @gfp_flags: The gfp flag for memory allocation
- *
- * Allocate and initialize a decryption context.
- *
- * Return: A new decryption context on success; an ERR_PTR() otherwise.
- */
-struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- struct fscrypt_ctx *ctx;
- unsigned long flags;
-
- /*
- * First try getting a ctx from the free list so that we don't have to
- * call into the slab allocator.
- */
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
- struct fscrypt_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- return ctx;
-}
-EXPORT_SYMBOL(fscrypt_get_ctx);
-
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
@@ -138,14 +73,17 @@ EXPORT_SYMBOL(fscrypt_free_bounce_page);
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{
+ u8 flags = fscrypt_policy_flags(&ci->ci_policy);
+
memset(iv, 0, ci->ci_mode->ivsize);
- iv->lblk_num = cpu_to_le64(lblk_num);
- if (fscrypt_is_direct_key_policy(&ci->ci_policy))
+ if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ WARN_ON_ONCE((u32)lblk_num != lblk_num);
+ lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
-
- if (ci->ci_essiv_tfm != NULL)
- crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
+ }
+ iv->lblk_num = cpu_to_le64(lblk_num);
}
/* Encrypt or decrypt a single filesystem block of file contents */
@@ -396,17 +334,6 @@ const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
-static void fscrypt_destroy(void)
-{
- struct fscrypt_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
- kmem_cache_free(fscrypt_ctx_cachep, pos);
- INIT_LIST_HEAD(&fscrypt_free_ctxs);
- mempool_destroy(fscrypt_bounce_page_pool);
- fscrypt_bounce_page_pool = NULL;
-}
-
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags
@@ -414,11 +341,11 @@ static void fscrypt_destroy(void)
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
- * Return: Zero on success, non-zero otherwise.
+ * Return: 0 on success; -errno on failure
*/
int fscrypt_initialize(unsigned int cop_flags)
{
- int i, res = -ENOMEM;
+ int err = 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
if (cop_flags & FS_CFLG_OWN_PAGES)
@@ -426,29 +353,18 @@ int fscrypt_initialize(unsigned int cop_flags)
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
- goto already_initialized;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct fscrypt_ctx *ctx;
-
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- }
+ goto out_unlock;
+ err = -ENOMEM;
fscrypt_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!fscrypt_bounce_page_pool)
- goto fail;
+ goto out_unlock;
-already_initialized:
- mutex_unlock(&fscrypt_init_mutex);
- return 0;
-fail:
- fscrypt_destroy();
+ err = 0;
+out_unlock:
mutex_unlock(&fscrypt_init_mutex);
- return res;
+ return err;
}
void fscrypt_msg(const struct inode *inode, const char *level,
@@ -494,13 +410,9 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_ctx_cachep)
- goto fail_free_queue;
-
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep)
- goto fail_free_ctx;
+ goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
@@ -510,8 +422,6 @@ static int __init fscrypt_init(void)
fail_free_info:
kmem_cache_destroy(fscrypt_info_cachep);
-fail_free_ctx:
- kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index e84efc01512e..130b50e5a011 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -163,11 +163,8 @@ struct fscrypt_info {
/* The actual crypto transform used for encryption and decryption */
struct crypto_skcipher *ci_ctfm;
- /*
- * Cipher for ESSIV IV generation. Only set for CBC contents
- * encryption, otherwise is NULL.
- */
- struct crypto_cipher *ci_essiv_tfm;
+ /* True if the key should be freed when this fscrypt_info is freed */
+ bool ci_owns_key;
/*
* Encryption mode used for this inode. It corresponds to either the
@@ -209,8 +206,6 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
{
@@ -289,7 +284,8 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
*/
#define HKDF_CONTEXT_KEY_IDENTIFIER 1
#define HKDF_CONTEXT_PER_FILE_KEY 2
-#define HKDF_CONTEXT_PER_MODE_KEY 3
+#define HKDF_CONTEXT_DIRECT_KEY 3
+#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
@@ -386,8 +382,14 @@ struct fscrypt_master_key {
struct list_head mk_decrypted_inodes;
spinlock_t mk_decrypted_inodes_lock;
- /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */
- struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1];
+ /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */
+ struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1];
+
+ /*
+ * Crypto API transforms for filesystem-layer implementation of
+ * IV_INO_LBLK_64 policies, allocated on-demand.
+ */
+ struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1];
} __randomize_layout;
@@ -443,8 +445,7 @@ struct fscrypt_mode {
const char *cipher_str;
int keysize;
int ivsize;
- bool logged_impl_name;
- bool needs_essiv;
+ int logged_impl_name;
};
static inline bool
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index c34fa7c61b43..040df1f5e1c8 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -43,8 +43,10 @@ static void free_master_key(struct fscrypt_master_key *mk)
wipe_master_key_secret(&mk->mk_secret);
- for (i = 0; i < ARRAY_SIZE(mk->mk_mode_keys); i++)
- crypto_free_skcipher(mk->mk_mode_keys[i]);
+ for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
+ crypto_free_skcipher(mk->mk_direct_tfms[i]);
+ crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]);
+ }
key_put(mk->mk_users);
kzfree(mk);
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index d71c2d6dd162..f577bb6613f9 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -8,15 +8,11 @@
* Heavily modified since then.
*/
-#include <crypto/aes.h>
-#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <linux/key.h>
#include "fscrypt_private.h"
-static struct crypto_shash *essiv_hash_tfm;
-
static struct fscrypt_mode available_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
@@ -31,11 +27,10 @@ static struct fscrypt_mode available_modes[] = {
.ivsize = 16,
},
[FSCRYPT_MODE_AES_128_CBC] = {
- .friendly_name = "AES-128-CBC",
- .cipher_str = "cbc(aes)",
+ .friendly_name = "AES-128-CBC-ESSIV",
+ .cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
.ivsize = 16,
- .needs_essiv = true,
},
[FSCRYPT_MODE_AES_128_CTS] = {
.friendly_name = "AES-128-CTS-CBC",
@@ -86,15 +81,13 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
mode->cipher_str, PTR_ERR(tfm));
return tfm;
}
- if (unlikely(!mode->logged_impl_name)) {
+ if (!xchg(&mode->logged_impl_name, 1)) {
/*
* fscrypt performance can vary greatly depending on which
* crypto algorithm implementation is used. Help people debug
* performance problems by logging the ->cra_driver_name the
- * first time a mode is used. Note that multiple threads can
- * race here, but it doesn't really matter.
+ * first time a mode is used.
*/
- mode->logged_impl_name = true;
pr_info("fscrypt: %s using implementation \"%s\"\n",
mode->friendly_name,
crypto_skcipher_alg(tfm)->base.cra_driver_name);
@@ -111,131 +104,64 @@ err_free_tfm:
return ERR_PTR(err);
}
-static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
-{
- struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);
-
- /* init hash transform on demand */
- if (unlikely(!tfm)) {
- struct crypto_shash *prev_tfm;
-
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
- fscrypt_warn(NULL,
- "Missing crypto API support for SHA-256");
- return -ENOPKG;
- }
- fscrypt_err(NULL,
- "Error allocating SHA-256 transform: %ld",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
- if (prev_tfm) {
- crypto_free_shash(tfm);
- tfm = prev_tfm;
- }
- }
-
- {
- SHASH_DESC_ON_STACK(desc, tfm);
- desc->tfm = tfm;
-
- return crypto_shash_digest(desc, key, keysize, salt);
- }
-}
-
-static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
- int keysize)
-{
- int err;
- struct crypto_cipher *essiv_tfm;
- u8 salt[SHA256_DIGEST_SIZE];
-
- if (WARN_ON(ci->ci_mode->ivsize != AES_BLOCK_SIZE))
- return -EINVAL;
-
- essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(essiv_tfm))
- return PTR_ERR(essiv_tfm);
-
- ci->ci_essiv_tfm = essiv_tfm;
-
- err = derive_essiv_salt(raw_key, keysize, salt);
- if (err)
- goto out;
-
- /*
- * Using SHA256 to derive the salt/key will result in AES-256 being
- * used for IV generation. File contents encryption will still use the
- * configured keysize (AES-128) nevertheless.
- */
- err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
- if (err)
- goto out;
-
-out:
- memzero_explicit(salt, sizeof(salt));
- return err;
-}
-
-/* Given the per-file key, set up the file's crypto transform object(s) */
+/* Given the per-file key, set up the file's crypto transform object */
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
{
- struct fscrypt_mode *mode = ci->ci_mode;
- struct crypto_skcipher *ctfm;
- int err;
-
- ctfm = fscrypt_allocate_skcipher(mode, derived_key, ci->ci_inode);
- if (IS_ERR(ctfm))
- return PTR_ERR(ctfm);
+ struct crypto_skcipher *tfm;
- ci->ci_ctfm = ctfm;
+ tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
- if (mode->needs_essiv) {
- err = init_essiv_generator(ci, derived_key, mode->keysize);
- if (err) {
- fscrypt_warn(ci->ci_inode,
- "Error initializing ESSIV generator: %d",
- err);
- return err;
- }
- }
+ ci->ci_ctfm = tfm;
+ ci->ci_owns_key = true;
return 0;
}
static int setup_per_mode_key(struct fscrypt_info *ci,
- struct fscrypt_master_key *mk)
+ struct fscrypt_master_key *mk,
+ struct crypto_skcipher **tfms,
+ u8 hkdf_context, bool include_fs_uuid)
{
+ const struct inode *inode = ci->ci_inode;
+ const struct super_block *sb = inode->i_sb;
struct fscrypt_mode *mode = ci->ci_mode;
u8 mode_num = mode - available_modes;
struct crypto_skcipher *tfm, *prev_tfm;
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+ u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
+ unsigned int hkdf_infolen = 0;
int err;
- if (WARN_ON(mode_num >= ARRAY_SIZE(mk->mk_mode_keys)))
+ if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
return -EINVAL;
/* pairs with cmpxchg() below */
- tfm = READ_ONCE(mk->mk_mode_keys[mode_num]);
+ tfm = READ_ONCE(tfms[mode_num]);
if (likely(tfm != NULL))
goto done;
BUILD_BUG_ON(sizeof(mode_num) != 1);
+ BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
+ BUILD_BUG_ON(sizeof(hkdf_info) != 17);
+ hkdf_info[hkdf_infolen++] = mode_num;
+ if (include_fs_uuid) {
+ memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
+ sizeof(sb->s_uuid));
+ hkdf_infolen += sizeof(sb->s_uuid);
+ }
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_PER_MODE_KEY,
- &mode_num, sizeof(mode_num),
+ hkdf_context, hkdf_info, hkdf_infolen,
mode_key, mode->keysize);
if (err)
return err;
- tfm = fscrypt_allocate_skcipher(mode, mode_key, ci->ci_inode);
+ tfm = fscrypt_allocate_skcipher(mode, mode_key, inode);
memzero_explicit(mode_key, mode->keysize);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/* pairs with READ_ONCE() above */
- prev_tfm = cmpxchg(&mk->mk_mode_keys[mode_num], NULL, tfm);
+ prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm);
if (prev_tfm != NULL) {
crypto_free_skcipher(tfm);
tfm = prev_tfm;
@@ -266,7 +192,19 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
ci->ci_mode->friendly_name);
return -EINVAL;
}
- return setup_per_mode_key(ci, mk);
+ return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
+ HKDF_CONTEXT_DIRECT_KEY, false);
+ } else if (ci->ci_policy.v2.flags &
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ /*
+ * IV_INO_LBLK_64: encryption keys are derived from (master_key,
+ * mode_num, filesystem_uuid), and inode number is included in
+ * the IVs. This format is optimized for use with inline
+ * encryption hardware compliant with the UFS or eMMC standards.
+ */
+ return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
+ HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
+ true);
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
@@ -388,13 +326,10 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (!ci)
return;
- if (ci->ci_direct_key) {
+ if (ci->ci_direct_key)
fscrypt_put_direct_key(ci->ci_direct_key);
- } else if ((ci->ci_ctfm != NULL || ci->ci_essiv_tfm != NULL) &&
- !fscrypt_is_direct_key_policy(&ci->ci_policy)) {
+ else if (ci->ci_owns_key)
crypto_free_skcipher(ci->ci_ctfm);
- crypto_free_cipher(ci->ci_essiv_tfm);
- }
key = ci->ci_master_key;
if (key) {
@@ -415,6 +350,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
key_invalidate(key);
key_put(key);
}
+ memzero_explicit(ci, sizeof(*ci));
kmem_cache_free(fscrypt_info_cachep, ci);
}
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index ad1a36c370c3..5298ef22aa85 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -270,10 +270,6 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
return -EINVAL;
}
- /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */
- if (WARN_ON(mode->needs_essiv))
- return -EINVAL;
-
dk = fscrypt_get_direct_key(ci, raw_master_key);
if (IS_ERR(dk))
return PTR_ERR(dk);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 4072ba644595..96f528071bed 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -29,6 +29,40 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
+static bool supported_iv_ino_lblk_64_policy(
+ const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ int ino_bits = 64, lblk_bits = 64;
+
+ if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
+ fscrypt_warn(inode,
+ "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive");
+ return false;
+ }
+ /*
+ * It's unsafe to include inode numbers in the IVs if the filesystem can
+ * potentially renumber inodes, e.g. via filesystem shrinking.
+ */
+ if (!sb->s_cop->has_stable_inodes ||
+ !sb->s_cop->has_stable_inodes(sb)) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers",
+ sb->s_id);
+ return false;
+ }
+ if (sb->s_cop->get_ino_and_lblk_bits)
+ sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
+ if (ino_bits > 32 || lblk_bits > 32) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers",
+ sb->s_id);
+ return false;
+ }
+ return true;
+}
+
/**
* fscrypt_supported_policy - check whether an encryption policy is supported
*
@@ -55,7 +89,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
- if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
+ if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
+ FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);
@@ -83,6 +118,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
+ !supported_iv_ino_lblk_64_policy(policy, inode))
+ return false;
+
if (memchr_inv(policy->__reserved, 0,
sizeof(policy->__reserved))) {
fscrypt_warn(inode,