#include "avr-asm-macros.S"
+acc2 = 8
+acc3 = 9
+acc0 = 14
+acc1 = 15
+
+#define DEBUG 0
+
/******************************************************************************/
/*
param a: r22:r23:r24:r25
param s: r20
*/
shiftleft32:
+ tst r20
+ brpl 10f
+ neg r20
+ rjmp shiftright32
+10:
clr r0
cpi r20, 8
brlo bitrotateleft_1
mov r23, r22
clr r22
subi r20, 8
- rjmp shiftleft32
+ rjmp 10b
/******************************************************************************/
/*
breq 20f
10:
lsl r0
+rol32:
rol r22
rol r23
rol r24
/******************************************************************************/
+sn_stub:
+ movw r22, r2
+ movw r24, r4
+ lpm r20, Z+
+ rcall rotateleft32
+eor32_to_acc:
+ eor acc0, r22
+ eor acc1, r23
+ eor acc2, r24
+ eor acc3, r25
+ ret
+
s_table:
s0: .byte 1, 3, 4,19
s1: .byte 1, 2, 8,23
s3: .byte 2, 2,15,29
s4: .byte 1, 0, 0, 0
s5: .byte 2, 0, 0, 0
-/*
-s0: .byte 0x34, 19
-s1: .byte 0x28, 23
-s2: .byte 0x9C, 25
-s3: .byte 0xAF, 29
-s4: .byte 0x00, 0
-s5: .byte 0x80, 0
-*/
-acc2 = 8
-acc3 = 9
+
h0 = 10
h1 = 11
m0 = 12
m1 = 13
-acc0 = 14
-acc1 = 15
/*
param x: r22:r23:r24:25
movw r4, r24
lpm r20, Z+
rcall shiftright32
- movw acc0, r22
- movw acc2, r24
+ rcall mov32_to_acc
;---
movw r22, r2
movw r24, r4
rcall shiftleft32
rcall eor32_to_acc
;---
- movw r22, r2
- movw r24, r4
- lpm r20, Z+
- rcall rotateleft32
- rcall eor32_to_acc
-;---
- movw r22, r2
- movw r24, r4
- lpm r20, Z+
- rcall rotateleft32
- rcall eor32_to_acc
+ rcall sn_stub
+ rcall sn_stub
+
movw r22, acc0
movw r24, acc2
pop acc3
pop acc2
pop acc1
pop acc0
- pop_range 2, 5
- ret
+ rjmp pop5
/******************************************************************************/
/*
param src: r30:r31 (Z)
param len: r20
*/
-memxor_short:
+memxor_64:
; tst r20
; breq memxor_exit
ldi r20, 64
+memxor:
10: ld r21, X
ld r22, Z+
eor r21, r22
adc r25, r0
ret
-store_acc_to_dec_X:
- st -X, acc3
- st -X, acc2
- st -X, acc1
- st -X, acc0
- ret
-
store32_to_X:
st X+, r22
st X+, r23
st X+, r25
ret
+mov32_to_acc:
+ movw acc0, r22
+ movw acc2, r24
+ ret
+
/******************************************************************************/
/*
param q: r28:r29 (Y)
param m: r30:r31 (Z)
*/
+f2_1_shift_table:
+; .byte 0x2B, 0x64, 0x66, 0x03, 0x51, 0x55, 0x87, 0x55
+; .byte 0x55, 0x87, 0x55, 0x51, 0x03, 0x66, 0x64, 0x2B
+ .byte 5, -5, -7, 8, -5, 5, -1, 5, -3, 0, 6, -6, -4, 6, -11, 2
+f2_2_shift_table:
+; .byte (2<<1), (7<<1), (4<<1), (3<<1), (4<<1)+1, (6<<1)+1, (6<<1)
+ .byte 8, -6, 6, 4, -3, -4, -7, -2
+expand2_rot_table:
+ .byte 3,7,13,16,19,23,27
+
f0_hacktable:
.byte 0x03, 0x11, 5*4
.byte 0xDD, 0xB3, 7*4
.byte 0x2A, 0x79, 10*4
.byte 0x07, 0xAA, 13*4
.byte 0x51, 0xC2, 14*4
- .byte 0 ; just for alignment
/*******************************************************************************
acc0 = 14
acc1 = 15
-add32_to_acc:
- add acc0, r22
- adc acc1, r23
- adc acc2, r24
- adc acc3, r25
- ret
-
-eor32_to_acc:
- eor acc0, r22
- eor acc1, r23
- eor acc2, r24
- eor acc3, r25
- ret
-
load_acc_from_X:
ld acc0, X+
ld acc1, X+
ld acc3, X+
ret
-add_acc_to_Z:
- ld r0, Z
+add_acc_to_X:
+ ld r0, X
add r0, acc0
- st Z+, r0
- ld r0, Z
+ st X+, r0
+ ld r0, X
adc r0, acc1
- st Z+, r0
- ld r0, Z
+ st X+, r0
+ ld r0, X
adc r0, acc2
- st Z+, r0
- ld r0, Z
+ st X+, r0
+ ld r0, X
adc r0, acc3
- st Z+, r0
+ st X+, r0
ret
load_rotate_add_M:
+ mov r20, j
andi r20, 0x0f
mov r0, r20
lsl r0
inc r20
rcall rotateleft32
brts 10f
- rcall add32_to_acc
- ret
+ rjmp add32_to_acc
+; ret
10: sub acc0, r22
sbc acc1, r23
sbc acc2, r24
sbc acc3, r25
ret
+
+;---
+
+/******************************************************************************/
+load_sn_add:
+ rcall load32_from_X
+ rcall sn
+add32_to_acc:
+ add acc0, r22
+ adc acc1, r23
+ adc acc2, r24
+ adc acc3, r25
+ ret
+
+/*
+ param q: r26:r27
+ param m: r22:r23
+ param h: r20:r21
+ param j: r24
+*/
+
+expand_intro:
+ push_range 26, 27
+ push r24
addelement:
mov j, r24
movw h0, r20
rcall store_acc_to_dec_X
adiw r26, 4
clt
- mov r20, j
rcall load_rotate_add_M
- mov r20, j
- subi r20, -3
+ subi j, -3
rcall load_rotate_add_M
- mov r20, j
set
- subi r20, -10
+ subi j, -7
rcall load_rotate_add_M
lsl j
lsl j
- subi j, -7*4
+ subi j, -7*4+10*4
andi j, 0x3f
movw r26, h0
add r26, j
adc r27, r1
rcall load32_from_X
rcall eor32_to_acc
-;---
- ret
-
-/******************************************************************************/
-load_sn_add:
- rcall load32_from_X
- rcall sn
- rcall add32_to_acc
- ret
-
-/*
- param q: r26:r27
- param m: r22:r23
- param h: r20:r21
- param j: r24
-*/
-
-expand_intro:
- push_range 20, 27
-; push r24
- rcall addelement
-; pop r24
- pop_range 20, 27
+;--
+ pop r24
+ pop_range 26, 27
lsl r24
lsl r24
add r26, r24
param j: r24
*/
-f2_1_shift_table:
- .byte 0x2B, 0x64, 0x66, 0x03, 0x51, 0x55, 0x87, 0x55
-f2_2_shift_table:
- .byte (2<<1), (7<<1), (4<<1), (3<<1), (4<<1)+1, (6<<1)+1, (6<<1)
-
-expand2_rot_table:
- .byte 3,7,13,16,19,23,27
-; .byte 0 ; just for alignment
expand2:
rcall expand_intro
rcall load_sn_add
expand2_exit:
adiw r26, 4
- rcall store_acc_to_dec_X
+store_acc_to_dec_X:
+ st -X, acc3
+ st -X, acc2
+ st -X, acc1
+ st -X, acc0
ret
/******************************************************************************/
h1 = 5
m0 = 6
m1 = 7
+ctx0 = 2
+ctx1 = 3
+msg0 = 4
+msg1 = 5
-
+restore_f1:
+ movw r26, r2
+ movw r22, r4
+ movw r20, r6
+ ret
+bmw_small_nextBlock_early:
+ movw r24, ctx0
+ movw r22, msg0
.global bmw_small_nextBlock
.global bmw224_nextBlock
.global bmw256_nextBlock
bmw_small_nextBlock:
bmw224_nextBlock:
bmw256_nextBlock:
+ push_range 2, 7
push_range 28, 29
- push_range 2, 17
+ push_range 8, 17
stack_alloc_large 32*4, r28, r29
ldi r16, 0x4f
push r16
movw m0, r30
/* xor m into h */
; ldi r20, 64
- rcall memxor_short
+ rcall memxor_64
movw r30, m0
movw r26, h0
; ldi r20, 64
movw r26, h0
movw r30, m0
- rcall memxor_short
+ rcall memxor_64
sbiw r26, 60
;---
clr r17
movw r26, r2
clr r24
rcall expand1
- movw r26, r2
- movw r22, r4
- movw r20, r6
+ rcall restore_f1
ldi r24, 1
rcall expand1
ldi r17, 2
-10: movw r26, r2
- movw r22, r4
- movw r20, r6
+10: rcall restore_f1
mov r24, r17
rcall expand2
inc r17
sbrs r17, 4
rjmp 10b
+ rcall restore_f1
movw r24, r2
- movw r22, r4
- movw r20, r6
/* call f2 */
h1 = 19
f2:
movw r26, r24
- /* calc XL */
+ /* calc XL & XH */
adiw r26, 63
adiw r26, 1
movw q16_0, r26
movw h0, r20
+;---
+; push h0
+; push h1
+;---
movw r28, r22
- rcall load32_from_X
- movw acc0, r22
- movw acc2, r24
+ rcall load_acc_from_X
ldi r17, 15
10: rcall load32_from_X
rcall eor32_to_acc
; rcall print32
; pop_range 22, 25
;--- END DBG
-
+ /* copy m(Y) into h */
+ movw r26, h0
+ ldi r22, 64
+10:
+ ld r23, Y+
+ st X+, r23
+ dec r22
+ brne 10b
;--- /* calc first half of h0..h15 */
- movw r26, q16_0
- ldi r17, 16
+ movw r28, q16_0
+ movw r26, h0
+ ldi r30, lo8(f2_1_shift_table)
+ ldi r31, hi8(f2_1_shift_table)
+ ldi r17, 15
10:
- ld acc0, Y+
- ld acc1, Y+
- ld acc2, Y+
- ld acc3, Y+
;---
movw r22, xh0
movw r24, xh2
- cpi r17, 9
- brge 15f
- clr r1
- rjmp 26f
-15: ldi r30, lo8(f2_1_shift_table-9)
- ldi r31, hi8(f2_1_shift_table-9)
- add r30, r17
- adc r31, r1
- lpm r20, Z
- mov r1, r20
- andi r20, 0x0f
- clt
- cpi r17, 16
- breq 20f
- cpi r17, 11
- brne 21f
-20: set
-21: brts 25f
- rcall shiftright32
- rjmp 26f
-25: rcall shiftleft32
-26: rcall eor32_to_acc
+ lpm r20, Z+
+ sbrc r17, 3
+ rcall shiftleft32
+ rcall mov32_to_acc
;---
- rcall load32_from_X
- mov r20, r1
- clr r1
- swap r20
- andi r20, 0x0f
- brts 27f
+ rcall load32_from_Y
+ lpm r20, Z+
+ sbrc r17, 3
rcall shiftleft32
- rjmp 28f
-27: rcall shiftright32
-28: rcall eor32_to_acc
+ rcall eor32_to_acc
;---
- movw r30, h0
- st Z+, acc0
- st Z+, acc1
- st Z+, acc2
- st Z+, acc3
- movw h0, r30
+ rcall load32_from_X
+ rcall eor32_to_acc
+ rcall store_acc_to_dec_X
+ adiw r26, 4
;---
dec r17
- brne 10b
+ brpl 10b
;-----
- sbiw r26, 4*8 /* X points to q[24] */
- movw r28, r26
+ sbiw r28, 4*8 /* Y points to q[24] */
+ movw r30, r28
sbiw r28, 63
sbiw r28, 33 /* Y points to q[0] */
- sbiw r30, 63
- sbiw r30, 1 /* Z points to h0 */
- ldi r17, 8
-10: movw acc0, xl0
- movw acc2, xl2
- rcall load32_from_X
- rcall eor32_to_acc
- rcall load32_from_Y
- rcall eor32_to_acc
- rcall add_acc_to_Z
- dec r17
- brne 10b
- sbiw r26, 9*4 /* X points to q[23] */
- rcall load_acc_from_X
- eor acc1, xl0
- eor acc2, xl1
- eor acc3, xl2
- rcall load32_from_Y
- rcall eor32_to_acc
- rcall add_acc_to_Z
-;---
- sbiw r26, 8*4 /* X points to q[16] */
- mov h0, r30
- ldi r17, 7
-10:
- ldi r30, lo8(f2_2_shift_table-1)
- ldi r31, hi8(f2_2_shift_table-1)
- add r30, r17
- adc r31, r1
- lpm r20, Z
- rcall load_acc_from_X
- movw r22, xl0
+ movw r26, r28
+ ldi r20, 8*4
+ /* xor q[24..31] into q[0..7] */
+ rcall memxor
+ /* xor q[23] into q[8] */
+ sbiw r30, 9*4
+ ldi r20, 4
+ rcall memxor
+ /* xor q[16..22] into q[9..15] */
+ sbiw r30, 8*4
+ ldi r20, 7*4
+ rcall memxor
+
+ movw r26, h0
+ ldi r17, 15
+ ldi r30, lo8(f2_2_shift_table-8)
+ ldi r31, hi8(f2_2_shift_table-8)
+10: movw r22, xl0
movw r24, xl2
- lsr r20
- brcc 20f
+ lpm r20, Z+
+ sbrs r17, 3
rcall shiftleft32
- rjmp 21f
-20: rcall shiftright32
-21:
- rcall eor32_to_acc
+ rcall mov32_to_acc
rcall load32_from_Y
rcall eor32_to_acc
- movw r30, h0
- rcall add_acc_to_Z
- movw h0, r30
+ rcall add_acc_to_X
dec r17
- brne 10b
+ brpl 10b
;-----
- sbiw r30, 8*4 /* Z points to h8 */
- movw r26, r30
- sbiw r26, 4*4 /* X points to h4 */
+ sbiw r26, 8*4 /* X points to h8 */
+ movw r28, r26
+ sbiw r28, 4*4 /* Y points to h4 */
ldi r17, 8
ldi r18, 9
10:
- rcall load32_from_X
+ rcall load32_from_Y
mov r20, r18
rcall rotateleft32
- movw acc0, r22
- movw acc2, r24
- rcall add_acc_to_Z
+ rcall mov32_to_acc
+ rcall add_acc_to_X
inc r18
cpi r17, 5
brne 20f
- sbiw r26, 8*4
+ sbiw r28, 8*4
20: dec r17
brne 10b
+exit:
;--- DBG
; pop r25
; pop r24
; rcall printX
;--- END DBG
stack_free_large3 32*4+4
- pop_range 2, 17
+ pop_range 10, 17
+pop9:
+ pop_range 8, 9
+pop28:
pop_range 28, 29
+pop7:
+ pop_range 6, 7
+pop5:
+ pop_range 2, 5
ret
/******************************************************************************/
1:
cpi len1, hi8(512)
brlo 2f
- movw r24, ctx0
- movw r22, blc0
- rcall bmw_small_nextBlock
+ rcall bmw_small_nextBlock_early
ldi r24, 64
add blc0, r24
adc blc1, r1
memcpy(pctx.buffer, block, (length_b+7)/8);
pctx.buffer[length_b>>3] |= 0x80 >> (length_b&0x07);
*/ movw r24, len0
+ ldi r23, 63
+ movw r26, blc0
lsr r25
ror r24
lsr r24
lsr r24
- ldi r23, 63
- sub r23, r24
- movw r26, blc0
- tst r24
breq 301f
+ sub r23, r24
/* copy (#r24) bytes to stack buffer */
30: ld r20, X+
st Z+, r20
breq 400f
cpi len0, 192
brlo 400f
- movw r24, ctx0
- movw r22, buf0
- rcall bmw_small_nextBlock
+ movw blc0, buf0
+ rcall bmw_small_nextBlock_early
movw r26, buf0
ldi r20, 64-8
350:
rcall load32_from_Z_stub
410:
clr r25
+ ldi r20, 1
lsl r21
- rol r22
- rol r23
- rol r24
- rol r25
+ rcall rol32
mov r20, len0
add r21, len1
adc r22, r1
adc r23, r1
adc r24, r1
adc r25, r1
- movw r30, buf0
- adiw r30, 64-8
- st Z+, r20
- st Z+, r21
- st Z+, r22
- st Z+, r23
- st Z+, r24
- st Z+, r25
- st Z+, r1
- st Z+, r1
- movw r24, ctx0
- movw r22, buf0
- rcall bmw_small_nextBlock
+ movw r26, buf0
+ adiw r26, 64-8
+ st X+, r20
+ st X+, r21
+ rcall store32_to_X
+ st X+, r1
+ st X+, r1
+ movw blc0, buf0
+ rcall bmw_small_nextBlock_early
/* memset(pctx.buffer, 0xaa, 64);
for(i=0; i<16;++i){
pctx.buffer[i*4] = i+0xa0;
/* bmw_small_nextBlock((bmw_small_ctx_t*)&pctx, ctx->h);
memcpy(ctx->h, pctx.buffer, 64);
*/
- movw r24, buf0
- movw r22, ctx0
- rcall bmw_small_nextBlock
+ movw r24, buf0
+ movw r22, ctx0
+ rcall bmw_small_nextBlock
ldi r18, 64
movw r26, ctx0
movw r30, buf0
brne 600b
stack_free_large 68
- pop_range 28, 29
- pop_range 2, 7
- ret
+ rjmp pop28
/*******************************************************************************
*/
.global bmw224_ctx2hash
bmw224_ctx2hash:
- movw r26, r24
movw r30, r22
adiw r30, 9*4
- ldi r22, 28
+ ldi r18, 28
rjmp 1f
/*******************************************************************************
*/
.global bmw256_ctx2hash
bmw256_ctx2hash:
- movw r26, r24
movw r30, r22
adiw r30, 8*4
- ldi r22, 32
-1:
- ld r23, Z+
+ ldi r18, 32
+1: movw r26, r24
+1: ld r23, Z+
st X+, r23
- dec r22
+ dec r18
brne 1b
ret
dst1 = 11
.global bmw256
bmw256:
- push r16
- ldi r16, 1
+ set
rjmp bmw_small_all
+
/*******************************************************************************
* void bmw224(void* dest, const void* msg, uint32_t length_b){
* bmw_small_ctx_t ctx;
dst1 = 7
.global bmw224
bmw224:
- push r16
- clr r16
+ clt
+
bmw_small_all:
- push_range 2, 9
+ push_range 2, 7
push_range 28, 29
+ push_range 8, 9
+ push r16
stack_alloc_large 64+4
adiw r30, 1
- movw ctx0, r30
+ clr r16
+ brtc 10f
+ inc r16
+10: movw ctx0, r30
movw dst0, r24
movw msg0, r22
movw len0, r18
mov r18, len2
or r18, len3
breq 50f
- movw r24, ctx0
- movw r22, msg0
- rcall bmw_small_nextBlock
+ rcall bmw_small_nextBlock_early
subi len1, 2
sbc len2, r1
sbc len3, r1
adc r31, r1
icall
stack_free_large 64+4
- pop_range 28, 29
- pop_range 2, 9
pop r16
- ret
+ rjmp pop9
init_lut:
rjmp bmw224_init
*/
.global bmw224_init
bmw224_init:
- movw r26, r24
- ldi r22, 0x03
- ldi r23, 0x02
- ldi r24, 0x01
- ldi r25, 0x00
+ ldi r22, 0x00
+ ldi r23, 0x40
bmw_small_init:
- rcall store32_to_X
- ldi r18, 16-1
- ldi r20, 0x04
-1:
- add r22, r20
- adc r23, r20
- adc r24, r20
- adc r25, r20
- rcall store32_to_X
- dec r18
- brne 1b
- st X+, r1
- st X+, r1
- st X+, r1
- st X+, r1
+ movw r26, r24
+ adiw r26, 4
+10:
+ st -X, r22
+ inc r22
+ mov r20, r22
+ andi r20, 0x3
+ brne 10b
+ adiw r26, 8
+20: cp r22, r23
+ brne 10b
+ st -X, r1
+ st -X, r1
+ st -X, r1
+ st -X, r1
ret
.global bmw256_init
bmw256_init:
- movw r26, r24
- ldi r22, 0x43
- ldi r23, 0x42
- ldi r24, 0x41
- ldi r25, 0x40
+ ldi r22, 0x40
+ ldi r23, 0x80
rjmp bmw_small_init