X-Git-Url: https://git.cryptolib.org/?a=blobdiff_plain;f=bmw%2Fbmw_small-tinyasm.S;h=94f1825a7b50704351877249f826f7aeff9ac760;hb=51e0ee9391650dd2827c0ab2a5f6dd9529fcaf5a;hp=de7ff5a2f62360952f9700b0d7fdc3020cf5f7d0;hpb=7bc75db2cff11a8a8b347e27dec3f4e019418d52;p=avr-crypto-lib.git diff --git a/bmw/bmw_small-tinyasm.S b/bmw/bmw_small-tinyasm.S index de7ff5a..94f1825 100644 --- a/bmw/bmw_small-tinyasm.S +++ b/bmw/bmw_small-tinyasm.S @@ -28,6 +28,11 @@ #include "avr-asm-macros.S" +acc2 = 8 +acc3 = 9 +acc0 = 14 +acc1 = 15 + /******************************************************************************/ /* param a: r22:r23:r24:r25 @@ -91,6 +96,7 @@ bitrotateleft_1: breq 20f 10: lsl r0 +rol32: rol r22 rol r23 rol r24 @@ -102,6 +108,18 @@ bitrotateleft_1: /******************************************************************************/ +sn_stub: + movw r22, r2 + movw r24, r4 + lpm r20, Z+ + rcall rotateleft32 +eor32_to_acc: + eor acc0, r22 + eor acc1, r23 + eor acc2, r24 + eor acc3, r25 + ret + s_table: s0: .byte 1, 3, 4,19 s1: .byte 1, 2, 8,23 @@ -109,13 +127,18 @@ s2: .byte 2, 1,12,25 s3: .byte 2, 2,15,29 s4: .byte 1, 0, 0, 0 s5: .byte 2, 0, 0, 0 - -eor_r22_in_r16: - eor r16, r22 - eor r17, r23 - eor r18, r24 - eor r19, r25 - ret +/* +s0: .byte 0x34, 19 +s1: .byte 0x28, 23 +s2: .byte 0x9C, 25 +s3: .byte 0xAF, 29 +s4: .byte 0x00, 0 +s5: .byte 0x80, 0 +*/ +h0 = 10 +h1 = 11 +m0 = 12 +m1 = 13 /* param x: r22:r23:r24:25 @@ -123,8 +146,10 @@ eor_r22_in_r16: */ sn: push_range 2, 5 - push r17 - push r19 + push acc0 + push acc1 + push acc2 + push acc3 ldi r30, lo8(s_table) ldi r31, hi8(s_table) lsl r20 @@ -135,30 +160,23 @@ sn: movw r4, r24 lpm r20, Z+ rcall shiftright32 - movw r16, r22 - movw r18, r24 + rcall mov32_to_acc ;--- movw r22, r2 movw r24, r4 lpm r20, Z+ rcall shiftleft32 - rcall eor_r22_in_r16 -;--- - movw r22, r2 - movw r24, r4 - lpm r20, Z+ - rcall rotateleft32 - rcall eor_r22_in_r16 + rcall eor32_to_acc ;--- - movw r22, r2 - movw r24, r4 - lpm r20, Z+ - rcall rotateleft32 - rcall eor_r22_in_r16 - movw r22, r16 - movw r24, r18 - pop r19 - pop r17 + rcall sn_stub + rcall sn_stub + + movw r22, acc0 + movw r24, acc2 + pop acc3 + pop acc2 + pop acc1 + pop acc0 pop_range 2, 5 ret @@ -222,6 +240,24 @@ add_X_to_32: ld r0, X+ adc r25, r0 ret + +store32_to_X: + st X+, r22 + st X+, r23 + st X+, r24 + st X+, r25 + ret + +mov32_to_acc: + movw acc0, r22 + movw acc2, r24 + ret + +eor_acc_from_Y_add_to_Z: + rcall load32_from_Y + rcall eor32_to_acc + rjmp add_acc_to_Z + /******************************************************************************/ /* param q: r28:r29 (Y) @@ -230,28 +266,13 @@ add_X_to_32: */ f0_hacktable: - .byte 0x03, 0x11 - .byte 0xDD, 0xB3 - .byte 0x2A, 0x79 - .byte 0x07, 0xAA - .byte 0x51, 0xC2 -f0_indextable: - .byte 5*4,7*4,10*4,13*4,14*4 -; .byte 0 ; just for alignment -f0_s_table: - .byte 0,1,2,3,4 - .byte 0,1,2,3,4 - .byte 0,1,2,3,4 -; .byte 0 - - -/******************************************************************************/ + .byte 0x03, 0x11, 5*4 + .byte 0xDD, 0xB3, 7*4 + .byte 0x2A, 0x79, 10*4 + .byte 0x07, 0xAA, 13*4 + .byte 0x51, 0xC2, 14*4 + .byte 0 ; just for alignment -const_lut: - .long 0x55555550, 0x5aaaaaa5, 0x5ffffffa, 0x6555554f - .long 0x6aaaaaa4, 0x6ffffff9, 0x7555554e, 0x7aaaaaa3 - .long 0x7ffffff8, 0x8555554d, 0x8aaaaaa2, 0x8ffffff7 - .long 0x9555554c, 0x9aaaaaa1, 0x9ffffff6, 0xa555554b /******************************************************************************* * uint32_t addelment(uint8_t j, const uint32_t* m, const uint32_t* h){ @@ -277,20 +298,6 @@ m1 = 13 acc0 = 14 acc1 = 15 -add32_to_acc: - add acc0, r22 - adc acc1, r23 - adc acc2, r24 - adc acc3, r25 - ret - -eor32_to_acc: - eor acc0, r22 - eor acc1, r23 - eor acc2, r24 - eor acc3, r25 - ret - load_acc_from_X: ld acc0, X+ ld acc1, X+ @@ -314,6 +321,7 @@ add_acc_to_Z: ret load_rotate_add_M: + mov r20, j andi r20, 0x0f mov r0, r20 lsl r0 @@ -325,71 +333,71 @@ load_rotate_add_M: inc r20 rcall rotateleft32 brts 10f - rcall add32_to_acc - ret + rjmp add32_to_acc +; ret 10: sub acc0, r22 sbc acc1, r23 sbc acc2, r24 sbc acc3, r25 ret + +;--- + +/******************************************************************************/ +load_sn_add: + rcall load32_from_X + rcall sn +add32_to_acc: + add acc0, r22 + adc acc1, r23 + adc acc2, r24 + adc acc3, r25 + ret + +/* + param q: r26:r27 + param m: r22:r23 + param h: r20:r21 + param j: r24 +*/ + +expand_intro: + push_range 26, 27 + push r24 addelement: mov j, r24 movw h0, r20 movw m0, r22 - lsl r24 - lsl r24 - mov r28, r24 - ldi r30, lo8(const_lut) - ldi r31, hi8(const_lut) - add r30, r24 - adc r31, r1 - lpm acc0, Z+ - lpm acc1, Z+ - lpm acc2, Z+ - lpm acc3, Z+ + sbiw r26, 4 + rcall load_acc_from_X + ldi r24, 0x55 + add acc0, r24 + adc acc1, r24 + adc acc2, r24 + ldi r24, 5 + adc acc3, r24 + rcall store_acc_to_dec_X + adiw r26, 4 clt - mov r20, j rcall load_rotate_add_M - mov r20, j - subi r20, -3 + subi j, -3 rcall load_rotate_add_M - mov r20, j set - subi r20, -10 + subi j, -7 rcall load_rotate_add_M lsl j lsl j - subi j, -7*4 + subi j, -7*4+10*4 andi j, 0x3f movw r26, h0 add r26, j adc r27, r1 - ld r0, X+ - eor acc0, r0 - ld r0, X+ - eor acc1, r0 - ld r0, X+ - eor acc2, r0 - ld r0, X+ - eor acc3, r0 -;--- - ret - -/******************************************************************************/ -/* - param q: r26:r27 - param m: r22:r23 - param h: r20:r21 - param j: r24 -*/ - -expand_intro: - push_range 20, 27 -; push r24 - rcall addelement -; pop r24 - pop_range 20, 27 + rcall load32_from_X + rcall eor32_to_acc +;-- + pop r24 + pop_range 26, 27 lsl r24 lsl r24 add r26, r24 @@ -399,11 +407,9 @@ expand1: rcall expand_intro ldi r19, 1 10: - rcall load32_from_X mov r20, r19 andi r20, 3 - rcall sn - rcall add32_to_acc + rcall load_sn_add inc r19 cpi r19, 17 brne 10b @@ -418,8 +424,14 @@ expand1: param j: r24 */ +f2_1_shift_table: + .byte 0x2B, 0x64, 0x66, 0x03, 0x51, 0x55, 0x87, 0x55 +f2_2_shift_table: + .byte (2<<1), (7<<1), (4<<1), (3<<1), (4<<1)+1, (6<<1)+1, (6<<1) + expand2_rot_table: - .byte 0,3,0,7,0,13,0,16,0,19,0,23,0,27 + .byte 3,7,13,16,19,23,27 +; .byte 0 ; just for alignment expand2: rcall expand_intro @@ -428,25 +440,24 @@ expand2: ldi r31, hi8(expand2_rot_table) 10: rcall load32_from_X - mov r20, r19 + sbrs r19, 0 + rjmp 12f lpm r20, Z+ rcall rotateleft32 - rcall add32_to_acc +12: rcall add32_to_acc dec r19 brne 10b ldi r20, 4 - rcall load32_from_X - rcall sn - rcall add32_to_acc + rcall load_sn_add ldi r20, 5 - rcall load32_from_X - rcall sn - rcall add32_to_acc + rcall load_sn_add expand2_exit: - st X+, acc0 - st X+, acc1 - st X+, acc2 - st X+, acc3 + adiw r26, 4 +store_acc_to_dec_X: + st -X, acc3 + st -X, acc2 + st -X, acc1 + st -X, acc0 ret /******************************************************************************/ @@ -468,12 +479,6 @@ expand2_exit: param m: r22:r23 param h: r20:r21 */ -f2_1_shift_table: - .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 - .byte 0x2B, 0x64, 0x66, 0x03, 0x51, 0x55, 0x87, 0x55 -f2_2_shift_table: - .byte (2<<1), (7<<1), (4<<1), (3<<1), (4<<1)+1, (6<<1)+1, (6<<1) - .byte 0 ; just for alignment /******************************************************************************/ /* @@ -502,6 +507,11 @@ h1 = 5 m0 = 6 m1 = 7 +restore_f1: + movw r26, r2 + movw r22, r4 + movw r20, r6 + ret .global bmw_small_nextBlock .global bmw224_nextBlock @@ -512,6 +522,13 @@ bmw256_nextBlock: push_range 28, 29 push_range 2, 17 stack_alloc_large 32*4, r28, r29 + ldi r16, 0x4f + push r16 + ldi r16, 0xff + push r16 + push r16 + ldi r16, 0xfb + push r16 adiw r28, 1 ; push_range 28, 29 /* push Q */ ; push_range 22, 25 /* push M & H */ @@ -526,10 +543,7 @@ bmw256_nextBlock: adc acc1, r1 adc acc2, r1 adc acc3, r1 - st -X, acc3 - st -X, acc2 - st -X, acc1 - st -X, acc0 + rcall store_acc_to_dec_X /* call f0 */ movw r30, r22 movw r26, r24 @@ -554,20 +568,18 @@ f0: 30: ldi r18, 16 /* load initial index */ - ldi r30, lo8(f0_indextable-1) - ldi r31, hi8(f0_indextable-1) - add r30, r19 - adc r31, r1 - lpm r16, Z + /* load values from hacktable */ - ldi r30, lo8(f0_hacktable-2) - ldi r31, hi8(f0_hacktable-2) - lsl r19 - add r30, r19 + ldi r30, lo8(f0_hacktable-3) + ldi r31, hi8(f0_hacktable-3) + mov r16, r19 + lsl r16 + add r16, r19 + add r30, r16 adc r31, r1 - lsr r19 lpm r21, Z+ - lpm r20, Z + lpm r20, Z+ + lpm r16, Z+ 40: ;call add_hx_to_w add_hx_to_w: @@ -583,14 +595,12 @@ add_hx_to_w: rcall add_X_to_32 rjmp 500f 300: /* substract */ - ld r0, X+ - sub r22, r0 - ld r0, X+ - sbc r23, r0 - ld r0, X+ - sbc r24, r0 - ld r0, X+ - sbc r25, r0 + rcall load_acc_from_X + sub r22, acc0 + sbc r23, acc1 + sbc r24, acc2 + sbc r25, acc3 + 500: rcall store32_to_Y subi r16, -4 @@ -608,18 +618,19 @@ add_hx_to_w: rcall memxor_short sbiw r26, 60 ;--- - ldi r30, lo8(f0_s_table) - ldi r31, hi8(f0_s_table) + clr r17 ldi r21, 15 mov r8, r21 50: rcall load32_from_Y sbiw r28, 4 - lpm r20, Z+ - movw r2, r30 + mov r20, r17 rcall sn - movw r30, r2 - + inc r17 + cpi r17, 5 + brne 52f + clr r17 +52: rcall add_X_to_32 rcall store32_to_Y @@ -629,44 +640,36 @@ add_hx_to_w: rcall load32_from_Y clr r20 rcall sn - movw r30, r2 movw r26, h0 rcall add_X_to_32 sbiw r26, 4 - st -Y, r25 - st -Y, r24 - st -Y, r23 - st -Y, r22 + sbiw r28, 4 + rcall store32_to_Y + sbiw r28, 4 sbiw r28, 15*4 movw r20, h0 movw r22, m0 /* call f1*/ - movw r24, r28 + movw r2, r28 f1: - movw r2, r24 movw r4, r22 movw r6, r20 movw r26, r2 clr r24 rcall expand1 - movw r26, r2 - movw r22, r4 - movw r20, r6 + rcall restore_f1 ldi r24, 1 rcall expand1 ldi r17, 2 -10: movw r26, r2 - movw r22, r4 - movw r20, r6 +10: rcall restore_f1 mov r24, r17 rcall expand2 inc r17 sbrs r17, 4 rjmp 10b + rcall restore_f1 movw r24, r2 - movw r22, r4 - movw r20, r6 /* call f2 */ @@ -700,8 +703,7 @@ f2: movw h0, r20 movw r28, r22 rcall load32_from_X - movw acc0, r22 - movw acc2, r24 + rcall mov32_to_acc ldi r17, 15 10: rcall load32_from_X rcall eor32_to_acc @@ -729,15 +731,17 @@ f2: movw r26, q16_0 ldi r17, 16 10: - ld acc0, Y+ - ld acc1, Y+ - ld acc2, Y+ - ld acc3, Y+ + rcall load32_from_Y + rcall mov32_to_acc ;--- - ldi r30, lo8(f2_1_shift_table-1) - ldi r31, hi8(f2_1_shift_table-1) movw r22, xh0 movw r24, xh2 + cpi r17, 9 + brge 15f + clr r1 + rjmp 26f +15: ldi r30, lo8(f2_1_shift_table-9) + ldi r31, hi8(f2_1_shift_table-9) add r30, r17 adc r31, r1 lpm r20, Z @@ -787,9 +791,7 @@ f2: movw acc2, xl2 rcall load32_from_X rcall eor32_to_acc - rcall load32_from_Y - rcall eor32_to_acc - rcall add_acc_to_Z + rcall eor_acc_from_Y_add_to_Z dec r17 brne 10b sbiw r26, 9*4 /* X points to q[23] */ @@ -797,9 +799,7 @@ f2: eor acc1, xl0 eor acc2, xl1 eor acc3, xl2 - rcall load32_from_Y - rcall eor32_to_acc - rcall add_acc_to_Z + rcall eor_acc_from_Y_add_to_Z ;--- sbiw r26, 8*4 /* X points to q[16] */ mov h0, r30 @@ -819,11 +819,9 @@ f2: rjmp 21f 20: rcall shiftright32 21: - rcall eor32_to_acc - rcall load32_from_Y - rcall eor32_to_acc movw r30, h0 - rcall add_acc_to_Z + rcall eor32_to_acc + rcall eor_acc_from_Y_add_to_Z movw h0, r30 dec r17 brne 10b @@ -837,8 +835,7 @@ f2: rcall load32_from_X mov r20, r18 rcall rotateleft32 - movw acc0, r22 - movw acc2, r24 + rcall mov32_to_acc rcall add_acc_to_Z inc r18 cpi r17, 5 @@ -853,7 +850,7 @@ f2: ; ldi r22, 'H' ; rcall printX ;--- END DBG - stack_free_large3 32*4 + stack_free_large3 32*4+4 pop_range 2, 17 pop_range 28, 29 ret @@ -925,15 +922,14 @@ bmw256_lastBlock: memcpy(pctx.buffer, block, (length_b+7)/8); pctx.buffer[length_b>>3] |= 0x80 >> (length_b&0x07); */ movw r24, len0 + ldi r23, 63 + movw r26, blc0 lsr r25 ror r24 lsr r24 lsr r24 - ldi r23, 63 - sub r23, r24 - movw r26, blc0 - tst r24 breq 301f + sub r23, r24 /* copy (#r24) bytes to stack buffer */ 30: ld r20, X+ st Z+, r20 @@ -991,27 +987,22 @@ bmw256_lastBlock: rcall load32_from_Z_stub 410: clr r25 + ldi r20, 1 lsl r21 - rol r22 - rol r23 - rol r24 - rol r25 + rcall rol32 mov r20, len0 add r21, len1 adc r22, r1 adc r23, r1 adc r24, r1 adc r25, r1 - movw r30, buf0 - adiw r30, 64-8 - st Z+, r20 - st Z+, r21 - st Z+, r22 - st Z+, r23 - st Z+, r24 - st Z+, r25 - st Z+, r1 - st Z+, r1 + movw r26, buf0 + adiw r26, 64-8 + st X+, r20 + st X+, r21 + rcall store32_to_X + st X+, r1 + st X+, r1 movw r24, ctx0 movw r22, buf0 rcall bmw_small_nextBlock @@ -1020,16 +1011,15 @@ bmw256_lastBlock: pctx.buffer[i*4] = i+0xa0; } */ - ldi r18, 0xa0 - ldi r19, 0xaa + ldi r22, 0xa0 + ldi r23, 0xaa + ldi r24, 0xaa + ldi r25, 0xaa movw r26, buf0 500: - st X+, r18 - st X+, r19 - st X+, r19 - st X+, r19 - inc r18 - sbrs r18, 4 + rcall store32_to_X + inc r22 + sbrs r22, 4 rjmp 500b /* bmw_small_nextBlock((bmw_small_ctx_t*)&pctx, ctx->h); memcpy(ctx->h, pctx.buffer, 64); @@ -1062,10 +1052,9 @@ bmw256_lastBlock: */ .global bmw224_ctx2hash bmw224_ctx2hash: - movw r26, r24 movw r30, r22 adiw r30, 9*4 - ldi r22, 28 + ldi r18, 28 rjmp 1f /******************************************************************************* @@ -1078,14 +1067,13 @@ bmw224_ctx2hash: */ .global bmw256_ctx2hash bmw256_ctx2hash: - movw r26, r24 movw r30, r22 adiw r30, 8*4 - ldi r22, 32 -1: - ld r23, Z+ + ldi r18, 32 +1: movw r26, r24 +1: ld r23, Z+ st X+, r23 - dec r22 + dec r18 brne 1b ret @@ -1223,42 +1211,30 @@ c2h_lut: */ .global bmw224_init bmw224_init: - movw r26, r24 - ldi r22, 0x03 - ldi r23, 0x02 - ldi r24, 0x01 - ldi r25, 0x00 + ldi r22, 0x00 + ldi r23, 0x40 bmw_small_init: - st X+, r22 - st X+, r23 - st X+, r24 - st X+, r25 - ldi r18, 16-1 - ldi r20, 0x04 -1: - add r22, r20 - adc r23, r20 - adc r24, r20 - adc r25, r20 - st X+, r22 - st X+, r23 - st X+, r24 - st X+, r25 - dec r18 - brne 1b - st X+, r1 - st X+, r1 - st X+, r1 - st X+, r1 + movw r26, r24 + adiw r26, 4 +10: + st -X, r22 + inc r22 + mov r20, r22 + andi r20, 0x3 + brne 10b + adiw r26, 8 +20: cp r22, r23 + brne 10b + st -X, r1 + st -X, r1 + st -X, r1 + st -X, r1 ret .global bmw256_init bmw256_init: - movw r26, r24 - ldi r22, 0x43 - ldi r23, 0x42 - ldi r24, 0x41 - ldi r25, 0x40 + ldi r22, 0x40 + ldi r23, 0x80 rjmp bmw_small_init