crypto: x86/aegis - Add missing error checks

The skcipher_walk functions can allocate memory and can fail, so
checking for errors is necessary.

Fixes: 1d373d4e8e ("crypto: x86 - Add optimized AEGIS implementations")
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers 2025-07-08 12:38:29 -07:00 committed by Herbert Xu
parent c7f49dadfc
commit 3d9eb180fb
1 changed files with 25 additions and 11 deletions

View File

@ -104,10 +104,12 @@ static void crypto_aegis128_aesni_process_ad(
}
}
static __always_inline void
static __always_inline int
crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk, bool enc)
{
int err = 0;
while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
if (enc)
aegis128_aesni_enc(state, walk->src.virt.addr,
@ -120,7 +122,8 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
round_down(walk->nbytes,
AEGIS128_BLOCK_SIZE));
kernel_fpu_end();
skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
err = skcipher_walk_done(walk,
walk->nbytes % AEGIS128_BLOCK_SIZE);
kernel_fpu_begin();
}
@ -134,9 +137,10 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
walk->dst.virt.addr,
walk->nbytes);
kernel_fpu_end();
skcipher_walk_done(walk, 0);
err = skcipher_walk_done(walk, 0);
kernel_fpu_begin();
}
return err;
}
static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead)
@ -169,7 +173,7 @@ static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm,
return 0;
}
static __always_inline void
static __always_inline int
crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_block *tag_xor,
unsigned int cryptlen, bool enc)
@ -178,20 +182,24 @@ crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
int err;
if (enc)
skcipher_walk_aead_encrypt(&walk, req, false);
err = skcipher_walk_aead_encrypt(&walk, req, false);
else
skcipher_walk_aead_decrypt(&walk, req, false);
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (err)
return err;
kernel_fpu_begin();
aegis128_aesni_init(&state, &ctx->key, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_aesni_process_crypt(&state, &walk, enc);
aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
err = crypto_aegis128_aesni_process_crypt(&state, &walk, enc);
if (err == 0)
aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
return err;
}
static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
@ -200,8 +208,11 @@ static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
struct aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
int err;
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true);
err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true);
if (err)
return err;
scatterwalk_map_and_copy(tag.bytes, req->dst,
req->assoclen + cryptlen, authsize, 1);
@ -216,11 +227,14 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req)
struct aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
int err;
scatterwalk_map_and_copy(tag.bytes, req->src,
req->assoclen + cryptlen, authsize, 0);
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false);
err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false);
if (err)
return err;
return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
}