linux/crypto/sha256.c
Herbert Xu 6c2bb98bc3 [CRYPTO] all: Pass tfm instead of ctx to algorithms
Up until now algorithms have been happy to get a context pointer since
they know everything that's in the tfm already (e.g., alignment, block
size).

However, once we have parameterised algorithms, such information will
be specific to each tfm.  So the algorithm API needs to be changed to
pass the tfm structure instead of the context pointer.

This patch is basically a text substitution.  The only tricky bit is
the assembly routines that need to get the context pointer offset
through asm-offsets.h.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2006-06-26 17:34:39 +10:00

339 lines
11 KiB
C

/*
* Cryptographic API.
*
* SHA-256, as specified in
* http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf
*
* SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
*
* Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <asm/scatterlist.h>
#include <asm/byteorder.h>
#define SHA256_DIGEST_SIZE 32
#define SHA256_HMAC_BLOCK_SIZE 64
struct sha256_ctx {
u32 count[2];
u32 state[8];
u8 buf[128];
};
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
}
static inline u32 Maj(u32 x, u32 y, u32 z)
{
return (x & y) | (z & (x | y));
}
#define e0(x) (ror32(x, 2) ^ ror32(x,13) ^ ror32(x,22))
#define e1(x) (ror32(x, 6) ^ ror32(x,11) ^ ror32(x,25))
#define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3))
#define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10))
#define H0 0x6a09e667
#define H1 0xbb67ae85
#define H2 0x3c6ef372
#define H3 0xa54ff53a
#define H4 0x510e527f
#define H5 0x9b05688c
#define H6 0x1f83d9ab
#define H7 0x5be0cd19
static inline void LOAD_OP(int I, u32 *W, const u8 *input)
{
W[I] = __be32_to_cpu( ((__be32*)(input))[I] );
}
static inline void BLEND_OP(int I, u32 *W)
{
W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
}
static void sha256_transform(u32 *state, const u8 *input)
{
u32 a, b, c, d, e, f, g, h, t1, t2;
u32 W[64];
int i;
/* load the input */
for (i = 0; i < 16; i++)
LOAD_OP(i, W, input);
/* now blend */
for (i = 16; i < 64; i++)
BLEND_OP(i, W);
/* load the state into our registers */
a=state[0]; b=state[1]; c=state[2]; d=state[3];
e=state[4]; f=state[5]; g=state[6]; h=state[7];
/* now iterate */
t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0x3956c25b + W[ 4];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0x550c7dc3 + W[11];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0x72be5d74 + W[12];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0x240ca1cc + W[19];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0x2de92c6f + W[20];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0xbf597fc7 + W[27];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0xc6e00bf3 + W[28];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0x53380d13 + W[35];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0x650a7354 + W[36];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0xc76c51a3 + W[43];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0xd192e819 + W[44];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0x34b0bcb5 + W[51];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0x391c0cb3 + W[52];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + 0x8cc70208 + W[59];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + 0x90befffa + W[60];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
state[0] += a; state[1] += b; state[2] += c; state[3] += d;
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
/* clear any sensitive info... */
a = b = c = d = e = f = g = h = t1 = t2 = 0;
memset(W, 0, 64 * sizeof(u32));
}
static void sha256_init(struct crypto_tfm *tfm)
{
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = H0;
sctx->state[1] = H1;
sctx->state[2] = H2;
sctx->state[3] = H3;
sctx->state[4] = H4;
sctx->state[5] = H5;
sctx->state[6] = H6;
sctx->state[7] = H7;
sctx->count[0] = sctx->count[1] = 0;
}
static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int i, index, part_len;
/* Compute number of bytes mod 128 */
index = (unsigned int)((sctx->count[0] >> 3) & 0x3f);
/* Update number of bits */
if ((sctx->count[0] += (len << 3)) < (len << 3)) {
sctx->count[1]++;
sctx->count[1] += (len >> 29);
}
part_len = 64 - index;
/* Transform as many times as possible. */
if (len >= part_len) {
memcpy(&sctx->buf[index], data, part_len);
sha256_transform(sctx->state, sctx->buf);
for (i = part_len; i + 63 < len; i += 64)
sha256_transform(sctx->state, &data[i]);
index = 0;
} else {
i = 0;
}
/* Buffer remaining input */
memcpy(&sctx->buf[index], &data[i], len-i);
}
static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
__be32 *dst = (__be32 *)out;
__be32 bits[2];
unsigned int index, pad_len;
int i;
static const u8 padding[64] = { 0x80, };
/* Save number of bits */
bits[1] = cpu_to_be32(sctx->count[0]);
bits[0] = cpu_to_be32(sctx->count[1]);
/* Pad out to 56 mod 64. */
index = (sctx->count[0] >> 3) & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
sha256_update(tfm, padding, pad_len);
/* Append length (before padding) */
sha256_update(tfm, (const u8 *)bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx));
}
static struct crypto_alg alg = {
.cra_name = "sha256",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha256_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA256_DIGEST_SIZE,
.dia_init = sha256_init,
.dia_update = sha256_update,
.dia_final = sha256_final } }
};
static int __init init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");