summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c13
-rw-r--r--drivers/crypto/mv_cesa.c41
-rw-r--r--drivers/crypto/n2_core.c11
-rw-r--r--drivers/crypto/omap-sham.c28
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c31
-rw-r--r--drivers/md/dm-crypt.c34
6 files changed, 67 insertions, 91 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 873f23425245..96531571f7cf 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -198,10 +198,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
- struct {
- struct shash_desc sdesc;
- char ctx[crypto_shash_descsize(shash)];
- } desc;
+
+ SHASH_DESC_ON_STACK(sdesc, shash);
+
unsigned int block_size = crypto_shash_blocksize(shash);
unsigned int digest_size = crypto_shash_digestsize(shash);
int i, ret;
@@ -216,11 +215,11 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
- desc.sdesc.tfm = shash;
- desc.sdesc.flags = crypto_ahash_get_flags(tfm) &
+ sdesc->tfm = shash;
+ sdesc->flags = crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = crypto_shash_digest(&desc.sdesc, key, key_len,
+ ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
if (ret) {
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 29d0ee504907..032c72c1f953 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -402,26 +402,23 @@ static int mv_hash_final_fallback(struct ahash_request *req)
{
const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
int rc;
- desc.shash.tfm = tfm_ctx->fallback;
- desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ shash->tfm = tfm_ctx->fallback;
+ shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
if (unlikely(req_ctx->first_hash)) {
- crypto_shash_init(&desc.shash);
- crypto_shash_update(&desc.shash, req_ctx->buffer,
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, req_ctx->buffer,
req_ctx->extra_bytes);
} else {
/* only SHA1 for now....
*/
- rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+ rc = mv_hash_import_sha1_ctx(req_ctx, shash);
if (rc)
goto out;
}
- rc = crypto_shash_final(&desc.shash, req->result);
+ rc = crypto_shash_final(shash, req->result);
out:
return rc;
}
@@ -794,23 +791,21 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
ss = crypto_shash_statesize(ctx->base_hash);
{
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(ctx->base_hash)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, ctx->base_hash);
+
unsigned int i;
char ipad[ss];
char opad[ss];
- desc.shash.tfm = ctx->base_hash;
- desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+ shash->tfm = ctx->base_hash;
+ shash->flags = crypto_shash_get_flags(ctx->base_hash) &
CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
int err;
err =
- crypto_shash_digest(&desc.shash, key, keylen, ipad);
+ crypto_shash_digest(shash, key, keylen, ipad);
if (err)
return err;
@@ -826,12 +821,12 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
opad[i] ^= 0x5c;
}
- rc = crypto_shash_init(&desc.shash) ? :
- crypto_shash_update(&desc.shash, ipad, bs) ? :
- crypto_shash_export(&desc.shash, ipad) ? :
- crypto_shash_init(&desc.shash) ? :
- crypto_shash_update(&desc.shash, opad, bs) ? :
- crypto_shash_export(&desc.shash, opad);
+ rc = crypto_shash_init(shash) ? :
+ crypto_shash_update(shash, ipad, bs) ? :
+ crypto_shash_export(shash, ipad) ? :
+ crypto_shash_init(shash) ? :
+ crypto_shash_update(shash, opad, bs) ? :
+ crypto_shash_export(shash, opad);
if (rc == 0)
mv_hash_init_ivs(ctx, ipad, opad);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 7263c10a56ee..f8e3207fecb1 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -445,10 +445,7 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child_shash = ctx->child_shash;
struct crypto_ahash *fallback_tfm;
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(child_shash)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, child_shash);
int err, bs, ds;
fallback_tfm = ctx->base.fallback_tfm;
@@ -456,15 +453,15 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
if (err)
return err;
- desc.shash.tfm = child_shash;
- desc.shash.flags = crypto_ahash_get_flags(tfm) &
+ shash->tfm = child_shash;
+ shash->flags = crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MAY_SLEEP;
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
BUG_ON(ds > N2_HASH_KEY_MAX);
if (keylen > bs) {
- err = crypto_shash_digest(&desc.shash, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
ctx->hash_key);
if (err)
return err;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 710d86386965..24ef48965e45 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -949,17 +949,14 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(bctx->shash)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, bctx->shash);
- desc.shash.tfm = bctx->shash;
- desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+ shash->tfm = bctx->shash;
+ shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
- return crypto_shash_init(&desc.shash) ?:
- crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
- crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+ return crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, bctx->opad, bs) ?:
+ crypto_shash_finup(shash, req->result, ds, req->result);
}
static int omap_sham_finish(struct ahash_request *req)
@@ -1118,18 +1115,15 @@ static int omap_sham_update(struct ahash_request *req)
return omap_sham_enqueue(req, OP_UPDATE);
}
-static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
const u8 *data, unsigned int len, u8 *out)
{
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(shash)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, tfm);
- desc.shash.tfm = shash;
- desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ shash->tfm = tfm;
+ shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_shash_digest(&desc.shash, data, len, out);
+ return crypto_shash_digest(shash, data, len, out);
}
static int omap_sham_final_shash(struct ahash_request *req)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 3e26fa2b293f..f2e2f158cfbe 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -149,10 +149,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
unsigned int auth_keylen)
{
struct qat_auth_state auth_state;
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(ctx->hash_tfm)];
- } desc;
+ SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
struct sha1_state sha1;
struct sha256_state sha256;
struct sha512_state sha512;
@@ -165,12 +162,12 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
int i, offset;
memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
- desc.shash.tfm = ctx->hash_tfm;
- desc.shash.flags = 0x0;
+ shash->tfm = ctx->hash_tfm;
+ shash->flags = 0x0;
if (auth_keylen > block_size) {
char buff[SHA512_BLOCK_SIZE];
- int ret = crypto_shash_digest(&desc.shash, auth_key,
+ int ret = crypto_shash_digest(shash, auth_key,
auth_keylen, buff);
if (ret)
return ret;
@@ -193,10 +190,10 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
*opad_ptr ^= 0x5C;
}
- if (crypto_shash_init(&desc.shash))
+ if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(&desc.shash, ipad, block_size))
+ if (crypto_shash_update(shash, ipad, block_size))
return -EFAULT;
hash_state_out = (__be32 *)hash->sha.state1;
@@ -204,19 +201,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(&desc.shash, &sha1))
+ if (crypto_shash_export(shash, &sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(*(sha1.state + i));
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(&desc.shash, &sha256))
+ if (crypto_shash_export(shash, &sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(*(sha256.state + i));
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(&desc.shash, &sha512))
+ if (crypto_shash_export(shash, &sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(*(sha512.state + i));
@@ -225,10 +222,10 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
return -EFAULT;
}
- if (crypto_shash_init(&desc.shash))
+ if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(&desc.shash, opad, block_size))
+ if (crypto_shash_update(shash, opad, block_size))
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
@@ -237,19 +234,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(&desc.shash, &sha1))
+ if (crypto_shash_export(shash, &sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(*(sha1.state + i));
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(&desc.shash, &sha256))
+ if (crypto_shash_export(shash, &sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(*(sha256.state + i));
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(&desc.shash, &sha512))
+ if (crypto_shash_export(shash, &sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(*(sha512.state + i));
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cd15e0801228..fc93b9330af4 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -526,29 +526,26 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
u8 *data)
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
- struct {
- struct shash_desc desc;
- char ctx[crypto_shash_descsize(lmk->hash_tfm)];
- } sdesc;
+ SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
struct md5_state md5state;
__le32 buf[4];
int i, r;
- sdesc.desc.tfm = lmk->hash_tfm;
- sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = lmk->hash_tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- r = crypto_shash_init(&sdesc.desc);
+ r = crypto_shash_init(desc);
if (r)
return r;
if (lmk->seed) {
- r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
+ r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
if (r)
return r;
}
/* Sector is always 512B, block size 16, add data of blocks 1-31 */
- r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
+ r = crypto_shash_update(desc, data + 16, 16 * 31);
if (r)
return r;
@@ -557,12 +554,12 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
buf[2] = cpu_to_le32(4024);
buf[3] = 0;
- r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
+ r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
if (r)
return r;
/* No MD5 padding here */
- r = crypto_shash_export(&sdesc.desc, &md5state);
+ r = crypto_shash_export(desc, &md5state);
if (r)
return r;
@@ -679,10 +676,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
- struct {
- struct shash_desc desc;
- char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
- } sdesc;
+ SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
int i, r;
/* xor whitening with sector number */
@@ -691,16 +685,16 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
crypto_xor(&buf[8], (u8 *)&sector, 8);
/* calculate crc32 for every 32bit part and xor it */
- sdesc.desc.tfm = tcw->crc32_tfm;
- sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = tcw->crc32_tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
for (i = 0; i < 4; i++) {
- r = crypto_shash_init(&sdesc.desc);
+ r = crypto_shash_init(desc);
if (r)
goto out;
- r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
+ r = crypto_shash_update(desc, &buf[i * 4], 4);
if (r)
goto out;
- r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
+ r = crypto_shash_final(desc, &buf[i * 4]);
if (r)
goto out;
}