summaryrefslogtreecommitdiff
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2021-01-05 17:48:04 +0100
committerHerbert Xu <herbert@gondor.apana.org.au>2021-01-14 17:10:29 +1100
commit9ad58b46f814edd5b8b288b66f94cf57c97eaea3 (patch)
treeede6fe246047056e64ad1fdbe1aa743a84e680d3 /arch/x86/crypto
parent407d409a8102a5ba042215aed7b2ef2d6e6c67a8 (diff)
crypto: x86/serpent - drop dependency on glue helper
Replace the glue helper dependency with implementations of ECB and CBC based on the new CPP macros, which avoid the need for indirect calls. Acked-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c73
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c61
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c81
3 files changed, 61 insertions, 154 deletions
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 28e542c6512a..261c9ac2d762 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -12,9 +12,10 @@
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <asm/crypto/glue_helper.h>
#include <asm/crypto/serpent-avx.h>
+#include "ecb_cbc_helpers.h"
+
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
/* 16-way AVX2 parallel cipher functions */
@@ -28,72 +29,38 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-static const struct common_glue_ctx serpent_enc = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .ecb = serpent_ecb_enc_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_encrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .ecb = serpent_ecb_dec_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_cbc = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .cbc = serpent_cbc_dec_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __serpent_decrypt }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_enc, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_enc_16way);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx);
+ ECB_BLOCK(1, __serpent_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_dec, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_dec_16way);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx);
+ ECB_BLOCK(1, __serpent_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__serpent_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_cbc_dec_16way);
+ CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx);
+ CBC_DEC_BLOCK(1, __serpent_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index aa4605baf9d4..5fe01d2a5b1d 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -15,9 +15,10 @@
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <asm/crypto/glue_helper.h>
#include <asm/crypto/serpent-avx.h>
+#include "ecb_cbc_helpers.h"
+
/* 8-way parallel cipher functions */
asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
@@ -37,63 +38,35 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-static const struct common_glue_ctx serpent_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_encrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __serpent_decrypt }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_enc, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx);
+ ECB_BLOCK(1, __serpent_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_dec, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx);
+ ECB_BLOCK(1, __serpent_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__serpent_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx);
+ CBC_DEC_BLOCK(1, __serpent_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 9acb3bf28feb..e28d60949c16 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -21,7 +21,8 @@
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include <asm/crypto/serpent-sse2.h>
-#include <asm/crypto/glue_helper.h>
+
+#include "ecb_cbc_helpers.h"
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -29,80 +30,46 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s)
+static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src)
{
- u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
- unsigned int j;
-
- for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
- ivs[j] = src[j];
+ u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE];
+ const u8 *s = src;
- serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
-
- for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
- u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
+ if (dst == src)
+ s = memcpy(buf, src, sizeof(buf));
+ serpent_dec_blk_xway(ctx, dst, src);
+ crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf));
}
-static const struct common_glue_ctx serpent_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_enc_blk_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_encrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_dec_blk_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = serpent_decrypt_cbc_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __serpent_decrypt }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_enc, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway);
+ ECB_BLOCK(1, __serpent_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_dec, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway);
+ ECB_BLOCK(1, __serpent_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__serpent_encrypt,
- req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__serpent_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway);
+ CBC_DEC_BLOCK(1, __serpent_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {