diff options
author | Nicolas Pitre <nico@cam.org> | 2005-10-28 15:26:40 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-10-28 15:26:40 +0100 |
commit | c09f98271f685af349d3f0199360f1c0e85550e0 (patch) | |
tree | 2e8a393d76e386ff64af2a786cf5baf370f3823b | |
parent | 0b7cd62ecdc1f09b7df4608a3fee644b1c27985b (diff) |
[ARM] 2930/1: optimized sha1 implementation for ARM
Patch from Nicolas Pitre
Here's an ARM assembly SHA1 implementation to replace the default C
version. It is approximately 50% faster than the generic C version. On
an XScale processor running at 400MHz:
generic C version: 9.8 MB/s
my version: 14.5 MB/s
This code is useful to quite a few callers in the tree:
crypto/sha1.c: sha_transform(sctx->state, sctx->buffer, temp);
crypto/sha1.c: sha_transform(sctx->state, &data[i], temp);
drivers/char/random.c: sha_transform(buf, (__u8 *)r->pool+i, buf + 5);
drivers/char/random.c: sha_transform(buf, (__u8 *)data, buf + 5);
net/ipv4/syncookies.c: sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
Signed-off-by: Nicolas Pitre <nico@cam.org>
Seems to work fine on big-endian as well.
Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/lib/sha1.S | 206 |
2 files changed, 207 insertions, 1 deletions
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 8725d63e4219..71e5b99e519e 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -11,7 +11,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ strnlen_user.o strchr.o strrchr.o testchangebit.o \ testclearbit.o testsetbit.o uaccess.o getuser.o \ putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ - ucmpdi2.o lib1funcs.o div64.o \ + ucmpdi2.o lib1funcs.o div64.o sha1.o \ io-readsb.o io-writesb.o io-readsl.o io-writesl.o ifeq ($(CONFIG_CPU_32v3),y) diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S new file mode 100644 index 000000000000..ff6ece487ffc --- /dev/null +++ b/arch/arm/lib/sha1.S @@ -0,0 +1,206 @@ +/* + * linux/arch/arm/lib/sha1.S + * + * SHA transform optimized for ARM + * + * Copyright: (C) 2005 by Nicolas Pitre <nico@cam.org> + * Created: September 17, 2005 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * The reference implementation for this code is linux/lib/sha1.c + */ + +#include <linux/linkage.h> + + .text + + +/* + * void sha_transform(__u32 *digest, const char *in, __u32 *W) + * + * Note: the "in" ptr may be unaligned. + */ + +ENTRY(sha_transform) + + stmfd sp!, {r4 - r8, lr} + + @ for (i = 0; i < 16; i++) + @ W[i] = be32_to_cpu(in[i]); */ + +#ifdef __ARMEB__ + mov r4, r0 + mov r0, r2 + mov r2, #64 + bl memcpy + mov r2, r0 + mov r0, r4 +#else + mov r3, r2 + mov lr, #16 +1: ldrb r4, [r1], #1 + ldrb r5, [r1], #1 + ldrb r6, [r1], #1 + ldrb r7, [r1], #1 + subs lr, lr, #1 + orr r5, r5, r4, lsl #8 + orr r6, r6, r5, lsl #8 + orr r7, r7, r6, lsl #8 + str r7, [r3], #4 + bne 1b +#endif + + @ for (i = 0; i < 64; i++) + @ W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31); + + sub r3, r2, #4 + mov lr, #64 +2: ldr r4, [r3, #4]! + subs lr, lr, #1 + ldr r5, [r3, #8] + ldr r6, [r3, #32] + ldr r7, [r3, #52] + eor r4, r4, r5 + eor r4, r4, r6 + eor r4, r4, r7 + mov r4, r4, ror #31 + str r4, [r3, #64] + bne 2b + + /* + * The SHA functions are: + * + * f1(B,C,D) = (D ^ (B & (C ^ D))) + * f2(B,C,D) = (B ^ C ^ D) + * f3(B,C,D) = ((B & C) | (D & (B | C))) + * + * Then the sub-blocks are processed as follows: + * + * A' = ror(A, 27) + f(B,C,D) + E + K + *W++ + * B' = A + * C' = ror(B, 2) + * D' = C + * E' = D + * + * We therefore unroll each loop 5 times to avoid register shuffling. + * Also the ror for C (and also D and E which are successivelyderived + * from it) is applied in place to cut on an additional mov insn for + * each round. + */ + + .macro sha_f1, A, B, C, D, E + ldr r3, [r2], #4 + eor ip, \C, \D + add \E, r1, \E, ror #2 + and ip, \B, ip, ror #2 + add \E, \E, \A, ror #27 + eor ip, ip, \D, ror #2 + add \E, \E, r3 + add \E, \E, ip + .endm + + .macro sha_f2, A, B, C, D, E + ldr r3, [r2], #4 + add \E, r1, \E, ror #2 + eor ip, \B, \C, ror #2 + add \E, \E, \A, ror #27 + eor ip, ip, \D, ror #2 + add \E, \E, r3 + add \E, \E, ip + .endm + + .macro sha_f3, A, B, C, D, E + ldr r3, [r2], #4 + add \E, r1, \E, ror #2 + orr ip, \B, \C, ror #2 + add \E, \E, \A, ror #27 + and ip, ip, \D, ror #2 + add \E, \E, r3 + and r3, \B, \C, ror #2 + orr ip, ip, r3 + add \E, \E, ip + .endm + + ldmia r0, {r4 - r8} + + mov lr, #4 + ldr r1, .L_sha_K + 0 + + /* adjust initial values */ + mov r6, r6, ror #30 + mov r7, r7, ror #30 + mov r8, r8, ror #30 + +3: subs lr, lr, #1 + sha_f1 r4, r5, r6, r7, r8 + sha_f1 r8, r4, r5, r6, r7 + sha_f1 r7, r8, r4, r5, r6 + sha_f1 r6, r7, r8, r4, r5 + sha_f1 r5, r6, r7, r8, r4 + bne 3b + + ldr r1, .L_sha_K + 4 + mov lr, #4 + +4: subs lr, lr, #1 + sha_f2 r4, r5, r6, r7, r8 + sha_f2 r8, r4, r5, r6, r7 + sha_f2 r7, r8, r4, r5, r6 + sha_f2 r6, r7, r8, r4, r5 + sha_f2 r5, r6, r7, r8, r4 + bne 4b + + ldr r1, .L_sha_K + 8 + mov lr, #4 + +5: subs lr, lr, #1 + sha_f3 r4, r5, r6, r7, r8 + sha_f3 r8, r4, r5, r6, r7 + sha_f3 r7, r8, r4, r5, r6 + sha_f3 r6, r7, r8, r4, r5 + sha_f3 r5, r6, r7, r8, r4 + bne 5b + + ldr r1, .L_sha_K + 12 + mov lr, #4 + +6: subs lr, lr, #1 + sha_f2 r4, r5, r6, r7, r8 + sha_f2 r8, r4, r5, r6, r7 + sha_f2 r7, r8, r4, r5, r6 + sha_f2 r6, r7, r8, r4, r5 + sha_f2 r5, r6, r7, r8, r4 + bne 6b + + ldmia r0, {r1, r2, r3, ip, lr} + add r4, r1, r4 + add r5, r2, r5 + add r6, r3, r6, ror #2 + add r7, ip, r7, ror #2 + add r8, lr, r8, ror #2 + stmia r0, {r4 - r8} + + ldmfd sp!, {r4 - r8, pc} + +.L_sha_K: + .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 + + +/* + * void sha_init(__u32 *buf) + */ + +.L_sha_initial_digest: + .word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 + +ENTRY(sha_init) + + str lr, [sp, #-4]! + adr r1, .L_sha_initial_digest + ldmia r1, {r1, r2, r3, ip, lr} + stmia r0, {r1, r2, r3, ip, lr} + ldr pc, [sp], #4 + |