#define ROTL64(a,n) (((a)<<(n))|((a)>>(64-(n))))
#define ROTR64(a,n) (((a)>>(n))|((a)<<(64-(n))))
-#define TWEAK 1
-#define BUG24 0
-#define F0_HACK 0
#define DEBUG 0
#define R64_6(x) (ROTR64((x), 21))
#define R64_7(x) (ROTR64((x), 11))
-/*
-#define K 0x0555555555555555LL
-#define MASK 0xFFFFFFFFFFFFFFFFLL
-static
-uint64_t k_lut[] PROGMEM = {
- 16LL*K, 17LL*K, 18LL*K, 19LL*K,
- 20LL*K, 21LL*K, 22LL*K, 23LL*K,
- 24LL*K, 25LL*K, 26LL*K, 27LL*K,
- 28LL*K, 29LL*K, 30LL*K, 31LL*K };
-*/
-/* the same as above but precomputed to avoid compiler warnings */
-static const
-uint64_t k_lut[] = {
- 0x5555555555555550LL, 0x5aaaaaaaaaaaaaa5LL, 0x5ffffffffffffffaLL,
- 0x655555555555554fLL, 0x6aaaaaaaaaaaaaa4LL, 0x6ffffffffffffff9LL,
- 0x755555555555554eLL, 0x7aaaaaaaaaaaaaa3LL, 0x7ffffffffffffff8LL,
- 0x855555555555554dLL, 0x8aaaaaaaaaaaaaa2LL, 0x8ffffffffffffff7LL,
- 0x955555555555554cLL, 0x9aaaaaaaaaaaaaa1LL, 0x9ffffffffffffff6LL,
- 0xa55555555555554bLL };
-
-static
-uint64_t bmw_large_expand1(uint8_t j, const uint64_t* q, const void* m, const void* h){
- uint64_t r;
- /* r = 0x0555555555555555LL*(j+16); */
- r = ( ROTL64(((uint64_t*)m)[(j)&0xf], ((j+ 0)&0xf)+1)
- + ROTL64(((uint64_t*)m)[(j+3)&0xf], ((j+ 3)&0xf)+1)
- + k_lut[j]
- - ROTL64(((uint64_t*)m)[(j+10)&0xf],((j+10)&0xf)+1)
- ) ^ ((uint64_t*)h)[(j+7)&0xf];
- r += S64_1(q[j+ 0]) + S64_2(q[j+ 1]) + S64_3(q[j+ 2]) + S64_0(q[j+ 3]) +
- S64_1(q[j+ 4]) + S64_2(q[j+ 5]) + S64_3(q[j+ 6]) + S64_0(q[j+ 7]) +
- S64_1(q[j+ 8]) + S64_2(q[j+ 9]) + S64_3(q[j+10]) + S64_0(q[j+11]) +
- S64_1(q[j+12]) + S64_2(q[j+13]) + S64_3(q[j+14]) + S64_0(q[j+15]);
-
- return r;
-}
-
-static
-uint64_t bmw_large_expand2(uint8_t j, const uint64_t* q, const void* m, const void* h){
- uint64_t r=0;
- r = ( ROTL64(((uint64_t*)m)[(j)&0xf], ((j+ 0)&0xf)+1)
- + ROTL64(((uint64_t*)m)[(j+3)&0xf], ((j+ 3)&0xf)+1)
- + k_lut[j]
- - ROTL64(((uint64_t*)m)[(j+10)&0xf],((j+10)&0xf)+1)
- ) ^ ((uint64_t*)h)[(j+7)&0xf];
- r += (q[j+ 0]) + R64_1(q[j+ 1]) + (q[j+ 2]) + R64_2(q[j+ 3]) +
- (q[j+ 4]) + R64_3(q[j+ 5]) + (q[j+ 6]) + R64_4(q[j+ 7]) +
- (q[j+ 8]) + R64_5(q[j+ 9]) + (q[j+10]) + R64_6(q[j+11]) +
- (q[j+12]) + R64_7(q[j+13]) + S64_4(q[j+14]) + S64_5(q[j+15]);
-
- return r;
-}
-
-#if F0_HACK==2
-/* to understand this implementation take a look at f0-opt-table.txt */
-static uint16_t hack_table[5] = { 0x0311, 0xDDB3, 0x2A79, 0x07AA, 0x51C2 };
-static uint8_t offset_table[5] = { 4+16, 6+16, 9+16, 12+16, 13+16 };
-
-
-static
-void bmw_large_f0(uint64_t* q, const uint64_t* h, const void* m){
- uint16_t hack_reg;
- uint8_t i,j,c;
- uint64_t(*s[])(uint64_t)={ bmw_large_s0, bmw_large_s1, bmw_large_s2,
- bmw_large_s3, bmw_large_s4 };
- for(i=0; i<16; ++i){
- ((uint64_t*)h)[i] ^= ((uint64_t*)m)[i];
- }
- dump_x(h, 16, 'T');
- memset(q, 0, 8*16);
- c=4;
- do{
- i=15;
- j = offset_table[c];
- hack_reg = hack_table[c];
- do{
- if(hack_reg&1){
- q[i]-= h[j&15];
- }else{
- q[i]+= h[j&15];
- }
- --j;
- hack_reg>>= 1;
- }while(i--!=0);
- }while(c--!=0);
- dump_x(q, 16, 'W');
- for(i=0; i<16; ++i){
- q[i] = s[i%5](q[i]);
- }
-#if TWEAK
- for(i=0; i<16; ++i){
- ((uint64_t*)h)[i] ^= ((uint64_t*)m)[i];
- }
- for(i=0; i<16; ++i){
- q[i] += h[(i+1)&0xf];
- }
-#endif /* TWEAK */
-}
-#endif /* F0_HACK==2 */
-
-#if F0_HACK==1
-static
-uint8_t f0_lut[] PROGMEM ={
- 5<<1, ( 7<<1)+1, (10<<1)+0, (13<<1)+0, (14<<1)+0,
- 6<<1, ( 8<<1)+1, (11<<1)+0, (14<<1)+0, (15<<1)+1,
- 0<<1, ( 7<<1)+0, ( 9<<1)+0, (12<<1)+1, (15<<1)+0,
- 0<<1, ( 1<<1)+1, ( 8<<1)+0, (10<<1)+1, (13<<1)+0,
- 1<<1, ( 2<<1)+0, ( 9<<1)+0, (11<<1)+1, (14<<1)+1,
- 3<<1, ( 2<<1)+1, (10<<1)+0, (12<<1)+1, (15<<1)+0,
- 4<<1, ( 0<<1)+1, ( 3<<1)+1, (11<<1)+1, (13<<1)+0,
- 1<<1, ( 4<<1)+1, ( 5<<1)+1, (12<<1)+1, (14<<1)+1,
- 2<<1, ( 5<<1)+1, ( 6<<1)+1, (13<<1)+0, (15<<1)+1,
- 0<<1, ( 3<<1)+1, ( 6<<1)+0, ( 7<<1)+1, (14<<1)+0,
- 8<<1, ( 1<<1)+1, ( 4<<1)+1, ( 7<<1)+1, (15<<1)+0,
- 8<<1, ( 0<<1)+1, ( 2<<1)+1, ( 5<<1)+1, ( 9<<1)+0,
- 1<<1, ( 3<<1)+0, ( 6<<1)+1, ( 9<<1)+1, (10<<1)+0,
- 2<<1, ( 4<<1)+0, ( 7<<1)+0, (10<<1)+0, (11<<1)+0,
- 3<<1, ( 5<<1)+1, ( 8<<1)+0, (11<<1)+1, (12<<1)+1,
- 12<<1, ( 4<<1)+1, ( 6<<1)+1, ( 9<<1)+1, (13<<1)+0
-};
-
-static
-void bmw_large_f0(uint64_t* q, const uint64_t* h, const void* m){
- uint8_t i,j=-1,v,sign,l=0;
- uint64_t(*s[])(uint64_t)={ bmw_large_s0, bmw_large_s1, bmw_large_s2,
- bmw_large_s3, bmw_large_s4 };
- for(i=0; i<16; ++i){
- ((uint64_t*)h)[i] ^= ((uint64_t*)m)[i];
- }
- dump_x(h, 16, 'T');
-// memset(q, 0, 4*16);
- for(i=0; i<5*16; ++i){
- v = pgm_read_byte(f0_lut+i);
- sign = v&1;
- v >>=1;
- if(i==l){
- j++;
- l+=5;
- q[j] = h[v];
- continue;
- }
- if(sign){
- q[j] -= h[v];
- }else{
- q[j] += h[v];
- }
- }
+#include "f1_autogen_large.i"
+
+static inline
+void bmw_large_f0(uint64_t* q, uint64_t* h, const uint64_t* m){
+ uint64_t t[16];
+ uint64_t tr0, tr1, tr2;
+ t[ 0] = h[ 0] ^ m[ 0];
+ t[ 1] = h[ 1] ^ m[ 1];
+ t[ 2] = h[ 2] ^ m[ 2];
+ t[ 3] = h[ 3] ^ m[ 3];
+ t[ 4] = h[ 4] ^ m[ 4];
+ t[ 5] = h[ 5] ^ m[ 5];
+ t[ 6] = h[ 6] ^ m[ 6];
+ t[ 7] = h[ 7] ^ m[ 7];
+ t[ 8] = h[ 8] ^ m[ 8];
+ t[ 9] = h[ 9] ^ m[ 9];
+ t[10] = h[10] ^ m[10];
+ t[11] = h[11] ^ m[11];
+ t[12] = h[12] ^ m[12];
+ t[13] = h[13] ^ m[13];
+ t[14] = h[14] ^ m[14];
+ t[15] = h[15] ^ m[15];
+ // dump_x(t, 16, 'T');
+ /*
+ q[ 0] = (t[ 5] - t[ 7] + t[10] + t[13] + t[14]);
+ q[ 1] = (t[ 6] - t[ 8] + t[11] + t[14] - t[15]);
+ q[ 2] = (t[ 0] + t[ 7] + t[ 9] - t[12] + t[15]);
+ q[ 3] = (t[ 0] - t[ 1] + t[ 8] - t[10] + t[13]);
+ q[ 4] = (t[ 1] + t[ 2] + t[ 9] - t[11] - t[14]);
+ q[ 5] = (t[ 3] - t[ 2] + t[10] - t[12] + t[15]);
+ q[ 6] = (t[ 4] - t[ 0] - t[ 3] - t[11] + t[13]);
+ q[ 7] = (t[ 1] - t[ 4] - t[ 5] - t[12] - t[14]);
+ q[ 8] = (t[ 2] - t[ 5] - t[ 6] + t[13] - t[15]);
+ q[ 9] = (t[ 0] - t[ 3] + t[ 6] - t[ 7] + t[14]);
+ q[10] = (t[ 8] - t[ 1] - t[ 4] - t[ 7] + t[15]);
+ q[11] = (t[ 8] - t[ 0] - t[ 2] - t[ 5] + t[ 9]);
+ q[12] = (t[ 1] + t[ 3] - t[ 6] - t[ 9] + t[10]);
+ q[13] = (t[ 2] + t[ 4] + t[ 7] + t[10] + t[11]);
+ q[14] = (t[ 3] - t[ 5] + t[ 8] - t[11] - t[12]);
+ q[15] = (t[12] - t[ 4] - t[ 6] - t[ 9] + t[13]);
+ */
+ q[ 0] = +t[ 5] +t[10] +t[13] +(tr1=-t[ 7]+t[14]) ;
+ q[ 3] = +t[ 8] +t[13] +t[ 0] +(tr2=-t[ 1]-t[10]) ;
+ q[ 6] = -t[11] +t[13] -t[ 0] -t[ 3] +t[ 4] ;
+ q[ 9] = +t[ 0] +(tr0=-t[ 3]+t[ 6]) +(tr1) ;
+ q[12] = -t[ 9] -(tr0) -(tr2) ;
+ q[15] = -t[ 4] +(tr0=-t[ 9]+t[12]) +(tr1=-t[ 6]+t[13]) ;
+ q[ 2] = +t[ 7] +t[15] +t[ 0] -(tr0) ;
+ q[ 5] = +t[10] +(tr0=-t[ 2]+t[15]) +(tr2=+t[ 3]-t[12]) ;
+ q[ 8] = -t[ 5] -(tr0) +(tr1) ;
+ q[11] = -t[ 0] -t[ 2] +t[ 9] +(tr0=-t[ 5]+t[ 8]) ;
+ q[14] = -t[11] +(tr0) +(tr2) ;
+ q[ 1] = +t[ 6] +(tr0=+t[11]+t[14]) +(tr1=-t[ 8]-t[15]) ;
+ q[ 4] = +t[ 9] +t[ 1] +t[ 2] -(tr0) ;
+ q[ 7] = -t[12] -t[14] +t[ 1] -t[ 4] -t[ 5] ;
+ q[10] = -t[ 1] +(tr0=-t[ 4]-t[ 7]) -(tr1) ;
+ q[13] = +t[ 2] +t[10] +t[11] -(tr0) ;
dump_x(q, 16, 'W');
- for(i=0; i<16; ++i){
- q[i] = s[i%5](q[i]);
- }
-#if TWEAK
- for(i=0; i<16; ++i){
- ((uint64_t*)h)[i] ^= ((uint64_t*)m)[i];
- }
- for(i=0; i<16; ++i){
- q[i] += h[(i+1)&0xf];
- }
-#endif /* TWEAK */
-}
-#endif /* F0_HACK==1 */
-
-#if F0_HACK==0
-static
-void bmw_large_f0(uint64_t* q, const uint64_t* h, const void* m){
- uint8_t i;
- for(i=0; i<16; ++i){
- ((uint64_t*)h)[i] ^= ((uint64_t*)m)[i];
- }
-// dump_x(t, 16, 'T');
- q[ 0] = (h[ 5] - h[ 7] + h[10] + h[13] + h[14]);
- q[ 1] = (h[ 6] - h[ 8] + h[11] + h[14] - h[15]);
- q[ 2] = (h[ 0] + h[ 7] + h[ 9] - h[12] + h[15]);
- q[ 3] = (h[ 0] - h[ 1] + h[ 8] - h[10] + h[13]);
- q[ 4] = (h[ 1] + h[ 2] + h[ 9] - h[11] - h[14]);
- q[ 5] = (h[ 3] - h[ 2] + h[10] - h[12] + h[15]);
- q[ 6] = (h[ 4] - h[ 0] - h[ 3] - h[11] + h[13]);
- q[ 7] = (h[ 1] - h[ 4] - h[ 5] - h[12] - h[14]);
- q[ 8] = (h[ 2] - h[ 5] - h[ 6] + h[13] - h[15]);
- q[ 9] = (h[ 0] - h[ 3] + h[ 6] - h[ 7] + h[14]);
- q[10] = (h[ 8] - h[ 1] - h[ 4] - h[ 7] + h[15]);
- q[11] = (h[ 8] - h[ 0] - h[ 2] - h[ 5] + h[ 9]);
- q[12] = (h[ 1] + h[ 3] - h[ 6] - h[ 9] + h[10]);
- q[13] = (h[ 2] + h[ 4] + h[ 7] + h[10] + h[11]);
- q[14] = (h[ 3] - h[ 5] + h[ 8] - h[11] - h[12]);
- q[15] = (h[12] - h[ 4] - h[ 6] - h[ 9] + h[13]);
- dump_x(q, 16, 'W');
- q[ 0] = S64_0(q[ 0]); q[ 1] = S64_1(q[ 1]); q[ 2] = S64_2(q[ 2]); q[ 3] = S64_3(q[ 3]); q[ 4] = S64_4(q[ 4]);
- q[ 5] = S64_0(q[ 5]); q[ 6] = S64_1(q[ 6]); q[ 7] = S64_2(q[ 7]); q[ 8] = S64_3(q[ 8]); q[ 9] = S64_4(q[ 9]);
- q[10] = S64_0(q[10]); q[11] = S64_1(q[11]); q[12] = S64_2(q[12]); q[13] = S64_3(q[13]); q[14] = S64_4(q[14]);
- q[15] = S64_0(q[15]);
- for(i=0; i<16; ++i){
- ((uint64_t*)h)[i] ^= ((uint64_t*)m)[i];
- }
- for(i=0; i<16; ++i){
- q[i] += h[(i+1)&0xf];
- }
+ q[ 0] = S64_0(q[ 0]) + h[ 1];
+ q[ 1] = S64_1(q[ 1]) + h[ 2];
+ q[ 2] = S64_2(q[ 2]) + h[ 3];
+ q[ 3] = S64_3(q[ 3]) + h[ 4];
+ q[ 4] = S64_4(q[ 4]) + h[ 5];
+ q[ 5] = S64_0(q[ 5]) + h[ 6];
+ q[ 6] = S64_1(q[ 6]) + h[ 7];
+ q[ 7] = S64_2(q[ 7]) + h[ 8];
+ q[ 8] = S64_3(q[ 8]) + h[ 9];
+ q[ 9] = S64_4(q[ 9]) + h[10];
+ q[10] = S64_0(q[10]) + h[11];
+ q[11] = S64_1(q[11]) + h[12];
+ q[12] = S64_2(q[12]) + h[13];
+ q[13] = S64_3(q[13]) + h[14];
+ q[14] = S64_4(q[14]) + h[15];
+ q[15] = S64_0(q[15]) + h[ 0];
}
-#endif /* F0_HACK==0 */
-static
-void bmw_large_f1(uint64_t* q, const void* m, const uint64_t* h){
- uint8_t i;
- q[16] = bmw_large_expand1(0, q, m, h);
- q[17] = bmw_large_expand1(1, q, m, h);
- for(i=2; i<16; ++i){
- q[16+i] = bmw_large_expand2(i, q, m, h);
- }
-}
+static inline
+void bmw_large_f2(uint64_t* h, const uint64_t* q, const uint64_t* m){
-static
-void bmw_large_f2(uint64_t* h, const uint64_t* q, const void* m){
- uint64_t xl=0, xh;
- uint8_t i;
- for(i=16;i<24;++i){
- xl ^= q[i];
- }
- xh = xl;
- for(i=24;i<32;++i){
- xh ^= q[i];
- }
+ uint64_t xl, xh;
+ xl = q[16] ^ q[17] ^ q[18] ^ q[19] ^ q[20] ^ q[21] ^ q[22] ^ q[23];
+ xh = xl ^ q[24] ^ q[25] ^ q[26] ^ q[27] ^ q[28] ^ q[29] ^ q[30] ^ q[31];
#if DEBUG
cli_putstr("\r\n XL = ");
- cli_hexdump_rev(&xl, 4);
+ cli_hexdump_rev(&xl, 8);
cli_putstr("\r\n XH = ");
- cli_hexdump_rev(&xh, 4);
+ cli_hexdump_rev(&xh, 8);
#endif
- memcpy(h, m, 16*8);
- h[0] ^= SHL64(xh, 5) ^ SHR64(q[16], 5);
- h[1] ^= SHR64(xh, 7) ^ SHL64(q[17], 8);
- h[2] ^= SHR64(xh, 5) ^ SHL64(q[18], 5);
- h[3] ^= SHR64(xh, 1) ^ SHL64(q[19], 5);
- h[4] ^= SHR64(xh, 3) ^ q[20];
- h[5] ^= SHL64(xh, 6) ^ SHR64(q[21], 6);
- h[6] ^= SHR64(xh, 4) ^ SHL64(q[22], 6);
- h[7] ^= SHR64(xh,11) ^ SHL64(q[23], 2);
- for(i=0; i<8; ++i){
- h[i] += xl ^ q[24+i] ^ q[i];
- }
- for(i=0; i<8; ++i){
- h[8+i] ^= xh ^ q[24+i];
- h[8+i] += ROTL64(h[(4+i)%8],i+9);
- }
- h[ 8] += SHL64(xl, 8) ^ q[23] ^ q[ 8];
- h[ 9] += SHR64(xl, 6) ^ q[16] ^ q[ 9];
- h[10] += SHL64(xl, 6) ^ q[17] ^ q[10];
- h[11] += SHL64(xl, 4) ^ q[18] ^ q[11];
- h[12] += SHR64(xl, 3) ^ q[19] ^ q[12];
- h[13] += SHR64(xl, 4) ^ q[20] ^ q[13];
- h[14] += SHR64(xl, 7) ^ q[21] ^ q[14];
- h[15] += SHR64(xl, 2) ^ q[22] ^ q[15];
+
+ h[0] = (SHL64(xh, 5) ^ SHR64(q[16], 5) ^ m[ 0]) + (xl ^ q[24] ^ q[ 0]);
+ h[1] = (SHR64(xh, 7) ^ SHL64(q[17], 8) ^ m[ 1]) + (xl ^ q[25] ^ q[ 1]);
+ h[2] = (SHR64(xh, 5) ^ SHL64(q[18], 5) ^ m[ 2]) + (xl ^ q[26] ^ q[ 2]);
+ h[3] = (SHR64(xh, 1) ^ SHL64(q[19], 5) ^ m[ 3]) + (xl ^ q[27] ^ q[ 3]);
+ h[4] = (SHR64(xh, 3) ^ q[20] ^ m[ 4]) + (xl ^ q[28] ^ q[ 4]);
+ h[5] = (SHL64(xh, 6) ^ SHR64(q[21], 6) ^ m[ 5]) + (xl ^ q[29] ^ q[ 5]);
+ h[6] = (SHR64(xh, 4) ^ SHL64(q[22], 6) ^ m[ 6]) + (xl ^ q[30] ^ q[ 6]);
+ h[7] = (SHR64(xh,11) ^ SHL64(q[23], 2) ^ m[ 7]) + (xl ^ q[31] ^ q[ 7]);
+
+ h[ 8] = ROTL64(h[4], 9) + (xh ^ q[24] ^ m[ 8]) + (SHL64(xl, 8) ^ q[23] ^ q[ 8]);
+ h[ 9] = ROTL64(h[5], 10) + (xh ^ q[25] ^ m[ 9]) + (SHR64(xl, 6) ^ q[16] ^ q[ 9]);
+ h[10] = ROTL64(h[6], 11) + (xh ^ q[26] ^ m[10]) + (SHL64(xl, 6) ^ q[17] ^ q[10]);
+ h[11] = ROTL64(h[7], 12) + (xh ^ q[27] ^ m[11]) + (SHL64(xl, 4) ^ q[18] ^ q[11]);
+ h[12] = ROTL64(h[0], 13) + (xh ^ q[28] ^ m[12]) + (SHR64(xl, 3) ^ q[19] ^ q[12]);
+ h[13] = ROTL64(h[1], 14) + (xh ^ q[29] ^ m[13]) + (SHR64(xl, 4) ^ q[20] ^ q[13]);
+ h[14] = ROTL64(h[2], 15) + (xh ^ q[30] ^ m[14]) + (SHR64(xl, 7) ^ q[21] ^ q[14]);
+ h[15] = ROTL64(h[3], 16) + (xh ^ q[31] ^ m[15]) + (SHR64(xl, 2) ^ q[22] ^ q[15]);
}
void bmw_large_nextBlock(bmw_large_ctx_t* ctx, const void* block){
}
void bmw_large_lastBlock(bmw_large_ctx_t* ctx, const void* block, uint16_t length_b){
- uint8_t buffer[128];
+ union {
+ uint8_t v8[128];
+ uint64_t v64[ 16];
+ } buffer;
while(length_b >= BMW_LARGE_BLOCKSIZE){
bmw_large_nextBlock(ctx, block);
length_b -= BMW_LARGE_BLOCKSIZE;
block = (uint8_t*)block + BMW_LARGE_BLOCKSIZE_B;
}
- memset(buffer, 0, 128);
- memcpy(buffer, block, (length_b+7)/8);
- buffer[length_b>>3] |= 0x80 >> (length_b&0x07);
+ memset(buffer.v8, 0, 128);
+ memcpy(buffer.v8, block, (length_b+7)/8);
+ buffer.v8[length_b>>3] |= 0x80 >> (length_b&0x07);
if(length_b+1>128*8-64){
- bmw_large_nextBlock(ctx, buffer);
- memset(buffer, 0, 128-8);
+ bmw_large_nextBlock(ctx, buffer.v8);
+ memset(buffer.v8, 0, 128-8);
ctx->counter -= 1;
}
- *((uint64_t*)&(buffer[128-8])) = (uint64_t)(ctx->counter*1024LL)+(uint64_t)length_b;
- bmw_large_nextBlock(ctx, buffer);
-#if TWEAK
+ buffer.v64[15] = (uint64_t)(ctx->counter*1024LL)+(uint64_t)length_b;
+ bmw_large_nextBlock(ctx, buffer.v8);
uint8_t i;
uint64_t q[32];
- memset(buffer, 0xaa, 128);
+ memset(buffer.v8, 0xaa, 128);
for(i=0; i<16; ++i){
- buffer[8*i] = i + 0xa0;
+ buffer.v8[8*i] = i + 0xa0;
}
- bmw_large_f0(q, (uint64_t*)buffer, ctx->h);
- bmw_large_f1(q, ctx->h, (uint64_t*)buffer);
- bmw_large_f2((uint64_t*)buffer, q, ctx->h);
- memcpy(ctx->h, buffer, 128);
-#endif
+ bmw_large_f0(q, buffer.v64, ctx->h);
+ bmw_large_f1(q, ctx->h, buffer.v64);
+ bmw_large_f2(buffer.v64, q, ctx->h);
+ memcpy(ctx->h, buffer.v8, 128);
}
void bmw384_init(bmw384_ctx_t* ctx){