lkml.org 
[lkml]   [2022]   [Nov]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v4 19/24] crypto: x86/aesni - avoid type conversions
Date
Change the type of the GCM auth_tag_len argument and derivative
variables from unsigned long to unsigned int, so they preserve the
type returned by crypto_aead_authsize().

Continue to pass it to the asm functions as an unsigned long,
but let those function calls be the place where the conversion
to the possibly larger type occurs.

This avoids possible truncation for calculations like:
scatterwalk_map_and_copy(auth_tag_msg, req->src,
req->assoclen + req->cryptlen - auth_tag_len,
auth_tag_len, 0);

whose third argument is an unsigned int. If unsigned long were
bigger than unsigned int, that equation could wrap.

Use unsigned int rather than int for intermediate variables
containing byte counts and block counts, since all the functions
using them accept unsigned int arguments.

Signed-off-by: Robert Elliott <elliott@hpe.com>
---
arch/x86/crypto/aesni-intel_glue.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index a5b0cb3efeba..921680373855 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -381,7 +381,7 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
- int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ unsigned int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
struct scatterlist *src = req->src, *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
@@ -437,7 +437,7 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
- int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ unsigned int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
struct scatterlist *src = req->src, *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
@@ -671,11 +671,11 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
unsigned int assoclen, u8 *hash_subkey,
u8 *iv, void *aes_ctx, u8 *auth_tag,
- unsigned long auth_tag_len)
+ unsigned int auth_tag_len)
{
u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
- unsigned long left = req->cryptlen;
+ unsigned int left = req->cryptlen;
struct scatter_walk assoc_sg_walk;
struct skcipher_walk walk;
bool do_avx, do_avx2;
@@ -782,7 +782,7 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ unsigned int auth_tag_len = crypto_aead_authsize(tfm);
u8 auth_tag[16];
int err;

@@ -801,7 +801,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ unsigned int auth_tag_len = crypto_aead_authsize(tfm);
u8 auth_tag_msg[16];
u8 auth_tag[16];
int err;
@@ -907,7 +907,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int tail = req->cryptlen % AES_BLOCK_SIZE;
+ unsigned int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
struct skcipher_walk walk;
int err;
@@ -920,7 +920,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
return err;

if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
- int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ unsigned int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;

skcipher_walk_abort(&walk);

@@ -945,7 +945,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);

while (walk.nbytes > 0) {
- int nbytes = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;

if (nbytes < walk.total)
nbytes &= ~(AES_BLOCK_SIZE - 1);
--
2.38.1
\
 
 \ /
  Last update: 2022-11-16 05:18    [W:0.196 / U:1.576 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site