crypto: aesni-intel - fixed problem with packets that are not multiple of 64bytes
authorTadeusz Struk <tadeusz.struk@intel.com>
Sun, 13 Mar 2011 08:56:17 +0000 (16:56 +0800)
committerBrad Figg <brad.figg@canonical.com>
Wed, 27 Apr 2011 18:40:33 +0000 (11:40 -0700)
BugLink: http://bugs.launchpad.net/bugs/761134

commit 60af520cf264ea26b2af3a6871bbd71850522aea upstream.

This patch fixes problem with packets that are not multiple of 64bytes.

Signed-off-by: Adrian Hoban <adrian.hoban@intel.com>
Signed-off-by: Aidan O'Mahony <aidan.o.mahony@intel.com>
Signed-off-by: Gabriele Paoloni <gabriele.paoloni@intel.com>
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>

arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/aesni-intel_glue.c

index 8fe2a49..4292df7 100644 (file)
@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt:
         movdqa SHUF_MASK(%rip), %xmm10
        PSHUFB_XMM %xmm10, %xmm0
 
+
        ENCRYPT_SINGLE_BLOCK    %xmm0, %xmm1        # Encrypt(K, Yn)
        sub $16, %r11
        add %r13, %r11
@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt:
        # GHASH computation for the last <16 byte block
        sub     %r13, %r11
        add     $16, %r11
-       PSHUFB_XMM %xmm10, %xmm1
+
+       movdqa SHUF_MASK(%rip), %xmm10
+       PSHUFB_XMM %xmm10, %xmm0
 
        # shuffle xmm0 back to output as ciphertext
 
index e1e60c7..b375b2a 100644 (file)
@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm)
        struct cryptd_aead *cryptd_tfm;
        struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
                PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+       struct crypto_aead *cryptd_child;
+       struct aesni_rfc4106_gcm_ctx *child_ctx;
        cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
        if (IS_ERR(cryptd_tfm))
                return PTR_ERR(cryptd_tfm);
+
+       cryptd_child = cryptd_aead_child(cryptd_tfm);
+       child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
+       memcpy(child_ctx, ctx, sizeof(*ctx));
        ctx->cryptd_tfm = cryptd_tfm;
        tfm->crt_aead.reqsize = sizeof(struct aead_request)
                + crypto_aead_reqsize(&cryptd_tfm->base);
@@ -925,6 +931,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
        int ret = 0;
        struct crypto_tfm *tfm = crypto_aead_tfm(parent);
        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+       struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+       struct aesni_rfc4106_gcm_ctx *child_ctx =
+                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
        u8 *new_key_mem = NULL;
 
        if (key_len < 4) {
@@ -968,6 +977,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
                goto exit;
        }
        ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+       memcpy(child_ctx, ctx, sizeof(*ctx));
 exit:
        kfree(new_key_mem);
        return ret;
@@ -999,7 +1009,6 @@ static int rfc4106_encrypt(struct aead_request *req)
        int ret;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
-       struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 
        if (!irq_fpu_usable()) {
                struct aead_request *cryptd_req =
@@ -1008,6 +1017,7 @@ static int rfc4106_encrypt(struct aead_request *req)
                aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
                return crypto_aead_encrypt(cryptd_req);
        } else {
+               struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
                kernel_fpu_begin();
                ret = cryptd_child->base.crt_aead.encrypt(req);
                kernel_fpu_end();
@@ -1020,7 +1030,6 @@ static int rfc4106_decrypt(struct aead_request *req)
        int ret;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
-       struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
 
        if (!irq_fpu_usable()) {
                struct aead_request *cryptd_req =
@@ -1029,6 +1038,7 @@ static int rfc4106_decrypt(struct aead_request *req)
                aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
                return crypto_aead_decrypt(cryptd_req);
        } else {
+               struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
                kernel_fpu_begin();
                ret = cryptd_child->base.crt_aead.decrypt(req);
                kernel_fpu_end();