1/* 2 * SHA1 routine optimized to do word accesses rather than byte accesses, 3 * and to avoid unnecessary copies into the context array. 4 * 5 * This was initially based on the Mozilla SHA1 implementation, although 6 * none of the original Mozilla code remains. 7 */ 8 9/* this is only to get definitions for memcpy(), ntohl() and htonl() */ 10#include"../git-compat-util.h" 11 12#include"sha1.h" 13 14#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) 15 16/* 17 * Force usage of rol or ror by selecting the one with the smaller constant. 18 * It _can_ generate slightly smaller code (a constant of 1 is special), but 19 * perhaps more importantly it's possibly faster on any uarch that does a 20 * rotate with a loop. 21 */ 22 23#define SHA_ASM(op, x, n) ({ unsigned int __res; __asm__(op"%1,%0":"=r" (__res):"i" (n),"0" (x)); __res; }) 24#define SHA_ROL(x,n) SHA_ASM("rol", x, n) 25#define SHA_ROR(x,n) SHA_ASM("ror", x, n) 26 27#else 28 29#define SHA_ROT(X,l,r) (((X) << (l)) | ((X) >> (r))) 30#define SHA_ROL(X,n) SHA_ROT(X,n,32-(n)) 31#define SHA_ROR(X,n) SHA_ROT(X,32-(n),n) 32 33#endif 34 35/* 36 * If you have 32 registers or more, the compiler can (and should) 37 * try to change the array[] accesses into registers. However, on 38 * machines with less than ~25 registers, that won't really work, 39 * and at least gcc will make an unholy mess of it. 40 * 41 * So to avoid that mess which just slows things down, we force 42 * the stores to memory to actually happen (we might be better off 43 * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as 44 * suggested by Artur Skawina - that will also make gcc unable to 45 * try to do the silly "optimize away loads" part because it won't 46 * see what the value will be). 47 * 48 * Ben Herrenschmidt reports that on PPC, the C version comes close 49 * to the optimized asm with this (ie on PPC you don't want that 50 * 'volatile', since there are lots of registers). 51 * 52 * On ARM we get the best code generation by forcing a full memory barrier 53 * between each SHA_ROUND, otherwise gcc happily get wild with spilling and 54 * the stack frame size simply explode and performance goes down the drain. 55 */ 56 57#if defined(__i386__) || defined(__x86_64__) 58#define setW(x, val) (*(volatile unsigned int *)&W(x) = (val)) 59#elif defined(__GNUC__) && defined(__arm__) 60#define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) 61#else 62#define setW(x, val) (W(x) = (val)) 63#endif 64 65/* This "rolls" over the 512-bit array */ 66#define W(x) (array[(x)&15]) 67 68/* 69 * Where do we get the source from? The first 16 iterations get it from 70 * the input data, the next mix it from the 512-bit array. 71 */ 72#define SHA_SRC(t) get_be32((unsigned char *) block + (t)*4) 73#define SHA_MIX(t) SHA_ROL(W((t)+13) ^ W((t)+8) ^ W((t)+2) ^ W(t), 1); 74 75#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ 76 unsigned int TEMP = input(t); setW(t, TEMP); \ 77 E += TEMP + SHA_ROL(A,5) + (fn) + (constant); \ 78 B = SHA_ROR(B, 2); } while (0) 79 80#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) 81#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) 82#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) 83#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) 84#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) 85 86static voidblk_SHA1_Block(blk_SHA_CTX *ctx,const void*block) 87{ 88unsigned int A,B,C,D,E; 89unsigned int array[16]; 90 91 A = ctx->H[0]; 92 B = ctx->H[1]; 93 C = ctx->H[2]; 94 D = ctx->H[3]; 95 E = ctx->H[4]; 96 97/* Round 1 - iterations 0-16 take their input from 'block' */ 98T_0_15(0, A, B, C, D, E); 99T_0_15(1, E, A, B, C, D); 100T_0_15(2, D, E, A, B, C); 101T_0_15(3, C, D, E, A, B); 102T_0_15(4, B, C, D, E, A); 103T_0_15(5, A, B, C, D, E); 104T_0_15(6, E, A, B, C, D); 105T_0_15(7, D, E, A, B, C); 106T_0_15(8, C, D, E, A, B); 107T_0_15(9, B, C, D, E, A); 108T_0_15(10, A, B, C, D, E); 109T_0_15(11, E, A, B, C, D); 110T_0_15(12, D, E, A, B, C); 111T_0_15(13, C, D, E, A, B); 112T_0_15(14, B, C, D, E, A); 113T_0_15(15, A, B, C, D, E); 114 115/* Round 1 - tail. Input from 512-bit mixing array */ 116T_16_19(16, E, A, B, C, D); 117T_16_19(17, D, E, A, B, C); 118T_16_19(18, C, D, E, A, B); 119T_16_19(19, B, C, D, E, A); 120 121/* Round 2 */ 122T_20_39(20, A, B, C, D, E); 123T_20_39(21, E, A, B, C, D); 124T_20_39(22, D, E, A, B, C); 125T_20_39(23, C, D, E, A, B); 126T_20_39(24, B, C, D, E, A); 127T_20_39(25, A, B, C, D, E); 128T_20_39(26, E, A, B, C, D); 129T_20_39(27, D, E, A, B, C); 130T_20_39(28, C, D, E, A, B); 131T_20_39(29, B, C, D, E, A); 132T_20_39(30, A, B, C, D, E); 133T_20_39(31, E, A, B, C, D); 134T_20_39(32, D, E, A, B, C); 135T_20_39(33, C, D, E, A, B); 136T_20_39(34, B, C, D, E, A); 137T_20_39(35, A, B, C, D, E); 138T_20_39(36, E, A, B, C, D); 139T_20_39(37, D, E, A, B, C); 140T_20_39(38, C, D, E, A, B); 141T_20_39(39, B, C, D, E, A); 142 143/* Round 3 */ 144T_40_59(40, A, B, C, D, E); 145T_40_59(41, E, A, B, C, D); 146T_40_59(42, D, E, A, B, C); 147T_40_59(43, C, D, E, A, B); 148T_40_59(44, B, C, D, E, A); 149T_40_59(45, A, B, C, D, E); 150T_40_59(46, E, A, B, C, D); 151T_40_59(47, D, E, A, B, C); 152T_40_59(48, C, D, E, A, B); 153T_40_59(49, B, C, D, E, A); 154T_40_59(50, A, B, C, D, E); 155T_40_59(51, E, A, B, C, D); 156T_40_59(52, D, E, A, B, C); 157T_40_59(53, C, D, E, A, B); 158T_40_59(54, B, C, D, E, A); 159T_40_59(55, A, B, C, D, E); 160T_40_59(56, E, A, B, C, D); 161T_40_59(57, D, E, A, B, C); 162T_40_59(58, C, D, E, A, B); 163T_40_59(59, B, C, D, E, A); 164 165/* Round 4 */ 166T_60_79(60, A, B, C, D, E); 167T_60_79(61, E, A, B, C, D); 168T_60_79(62, D, E, A, B, C); 169T_60_79(63, C, D, E, A, B); 170T_60_79(64, B, C, D, E, A); 171T_60_79(65, A, B, C, D, E); 172T_60_79(66, E, A, B, C, D); 173T_60_79(67, D, E, A, B, C); 174T_60_79(68, C, D, E, A, B); 175T_60_79(69, B, C, D, E, A); 176T_60_79(70, A, B, C, D, E); 177T_60_79(71, E, A, B, C, D); 178T_60_79(72, D, E, A, B, C); 179T_60_79(73, C, D, E, A, B); 180T_60_79(74, B, C, D, E, A); 181T_60_79(75, A, B, C, D, E); 182T_60_79(76, E, A, B, C, D); 183T_60_79(77, D, E, A, B, C); 184T_60_79(78, C, D, E, A, B); 185T_60_79(79, B, C, D, E, A); 186 187 ctx->H[0] += A; 188 ctx->H[1] += B; 189 ctx->H[2] += C; 190 ctx->H[3] += D; 191 ctx->H[4] += E; 192} 193 194voidblk_SHA1_Init(blk_SHA_CTX *ctx) 195{ 196 ctx->size =0; 197 198/* Initialize H with the magic constants (see FIPS180 for constants) */ 199 ctx->H[0] =0x67452301; 200 ctx->H[1] =0xefcdab89; 201 ctx->H[2] =0x98badcfe; 202 ctx->H[3] =0x10325476; 203 ctx->H[4] =0xc3d2e1f0; 204} 205 206voidblk_SHA1_Update(blk_SHA_CTX *ctx,const void*data,unsigned long len) 207{ 208unsigned int lenW = ctx->size &63; 209 210 ctx->size += len; 211 212/* Read the data into W and process blocks as they get full */ 213if(lenW) { 214unsigned int left =64- lenW; 215if(len < left) 216 left = len; 217memcpy(lenW + (char*)ctx->W, data, left); 218 lenW = (lenW + left) &63; 219 len -= left; 220 data = ((const char*)data + left); 221if(lenW) 222return; 223blk_SHA1_Block(ctx, ctx->W); 224} 225while(len >=64) { 226blk_SHA1_Block(ctx, data); 227 data = ((const char*)data +64); 228 len -=64; 229} 230if(len) 231memcpy(ctx->W, data, len); 232} 233 234voidblk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx) 235{ 236static const unsigned char pad[64] = {0x80}; 237unsigned int padlen[2]; 238int i; 239 240/* Pad with a binary 1 (ie 0x80), then zeroes, then length */ 241 padlen[0] =htonl((uint32_t)(ctx->size >>29)); 242 padlen[1] =htonl((uint32_t)(ctx->size <<3)); 243 244 i = ctx->size &63; 245blk_SHA1_Update(ctx, pad,1+ (63& (55- i))); 246blk_SHA1_Update(ctx, padlen,8); 247 248/* Output hash */ 249for(i =0; i <5; i++) 250put_be32(hashout + i *4, ctx->H[i]); 251}