rebase on newer SUPERCOP, use PIE, some other stuff

This commit is contained in:
cathugger
2022-05-05 13:22:34 +00:00
parent 0819ccd81d
commit f374555fd4
83 changed files with 2062 additions and 1955 deletions

View File

@@ -105,13 +105,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_choose_t
# qhasm: enter CRYPTO_NAMESPACE(batch_choose_t)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_choose_t
.globl crypto_sign_ed25519_amd64_51_30k_batch_choose_t
_crypto_sign_ed25519_amd64_51_30k_batch_choose_t:
crypto_sign_ed25519_amd64_51_30k_batch_choose_t:
.globl _CRYPTO_NAMESPACE(batch_choose_t)
.globl CRYPTO_NAMESPACE(batch_choose_t)
_CRYPTO_NAMESPACE(batch_choose_t):
CRYPTO_NAMESPACE(batch_choose_t):
mov %rsp,%r11
and $31,%r11
add $64,%r11
@@ -1677,30 +1677,30 @@ movq 952(%rcx,%rdi),%rdi
# asm 2: cmove <t=%rdi,<tt2d4=%r11
cmove %rdi,%r11
# qhasm: tt0 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=int64#1
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=%rdi
movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdi
# qhasm: tt0 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P0),>tt0=int64#1
# asm 2: movq CRYPTO_NAMESPACE(batch_2P0),>tt0=%rdi
movq CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdi
# qhasm: tt1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=int64#4
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=%rcx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: tt1 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt1=int64#4
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt1=%rcx
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: tt2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=int64#5
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=%r8
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: tt2 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt2=int64#5
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt2=%r8
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: tt3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=int64#10
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=%r12
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: tt3 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt3=int64#10
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt3=%r12
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: tt4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=int64#11
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=%r13
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r13
# qhasm: tt4 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt4=int64#11
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt4=%r13
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r13
# qhasm: tt0 -= tt2d0
# asm 1: sub <tt2d0=int64#2,<tt0=int64#1

View File

@@ -0,0 +1,47 @@
.section .rodata
.globl CRYPTO_NAMESPACE(batch_REDMASK51)
.globl CRYPTO_NAMESPACE(batch_121666_213)
.globl CRYPTO_NAMESPACE(batch_2P0)
.globl CRYPTO_NAMESPACE(batch_2P1234)
.globl CRYPTO_NAMESPACE(batch_4P0)
.globl CRYPTO_NAMESPACE(batch_4P1234)
.globl CRYPTO_NAMESPACE(batch_MU0)
.globl CRYPTO_NAMESPACE(batch_MU1)
.globl CRYPTO_NAMESPACE(batch_MU2)
.globl CRYPTO_NAMESPACE(batch_MU3)
.globl CRYPTO_NAMESPACE(batch_MU4)
.globl CRYPTO_NAMESPACE(batch_ORDER0)
.globl CRYPTO_NAMESPACE(batch_ORDER1)
.globl CRYPTO_NAMESPACE(batch_ORDER2)
.globl CRYPTO_NAMESPACE(batch_ORDER3)
.globl CRYPTO_NAMESPACE(batch_EC2D0)
.globl CRYPTO_NAMESPACE(batch_EC2D1)
.globl CRYPTO_NAMESPACE(batch_EC2D2)
.globl CRYPTO_NAMESPACE(batch_EC2D3)
.globl CRYPTO_NAMESPACE(batch_EC2D4)
.globl CRYPTO_NAMESPACE(batch__38)
.p2align 4
CRYPTO_NAMESPACE(batch_REDMASK51): .quad 0x0007FFFFFFFFFFFF
CRYPTO_NAMESPACE(batch_121666_213): .quad 996687872
CRYPTO_NAMESPACE(batch_2P0): .quad 0xFFFFFFFFFFFDA
CRYPTO_NAMESPACE(batch_2P1234): .quad 0xFFFFFFFFFFFFE
CRYPTO_NAMESPACE(batch_4P0): .quad 0x1FFFFFFFFFFFB4
CRYPTO_NAMESPACE(batch_4P1234): .quad 0x1FFFFFFFFFFFFC
CRYPTO_NAMESPACE(batch_MU0): .quad 0xED9CE5A30A2C131B
CRYPTO_NAMESPACE(batch_MU1): .quad 0x2106215D086329A7
CRYPTO_NAMESPACE(batch_MU2): .quad 0xFFFFFFFFFFFFFFEB
CRYPTO_NAMESPACE(batch_MU3): .quad 0xFFFFFFFFFFFFFFFF
CRYPTO_NAMESPACE(batch_MU4): .quad 0x000000000000000F
CRYPTO_NAMESPACE(batch_ORDER0): .quad 0x5812631A5CF5D3ED
CRYPTO_NAMESPACE(batch_ORDER1): .quad 0x14DEF9DEA2F79CD6
CRYPTO_NAMESPACE(batch_ORDER2): .quad 0x0000000000000000
CRYPTO_NAMESPACE(batch_ORDER3): .quad 0x1000000000000000
CRYPTO_NAMESPACE(batch_EC2D0): .quad 1859910466990425
CRYPTO_NAMESPACE(batch_EC2D1): .quad 932731440258426
CRYPTO_NAMESPACE(batch_EC2D2): .quad 1072319116312658
CRYPTO_NAMESPACE(batch_EC2D3): .quad 1815898335770999
CRYPTO_NAMESPACE(batch_EC2D4): .quad 633789495995903
CRYPTO_NAMESPACE(batch__38): .quad 38

View File

@@ -1,47 +0,0 @@
.data
.globl crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
.globl crypto_sign_ed25519_amd64_51_30k_batch_121666_213
.globl crypto_sign_ed25519_amd64_51_30k_batch_2P0
.globl crypto_sign_ed25519_amd64_51_30k_batch_2P1234
.globl crypto_sign_ed25519_amd64_51_30k_batch_4P0
.globl crypto_sign_ed25519_amd64_51_30k_batch_4P1234
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU0
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU1
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU2
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU3
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU4
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
.globl crypto_sign_ed25519_amd64_51_30k_batch__38
.p2align 4
crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51: .quad 0x0007FFFFFFFFFFFF
crypto_sign_ed25519_amd64_51_30k_batch_121666_213: .quad 996687872
crypto_sign_ed25519_amd64_51_30k_batch_2P0: .quad 0xFFFFFFFFFFFDA
crypto_sign_ed25519_amd64_51_30k_batch_2P1234: .quad 0xFFFFFFFFFFFFE
crypto_sign_ed25519_amd64_51_30k_batch_4P0: .quad 0x1FFFFFFFFFFFB4
crypto_sign_ed25519_amd64_51_30k_batch_4P1234: .quad 0x1FFFFFFFFFFFFC
crypto_sign_ed25519_amd64_51_30k_batch_MU0: .quad 0xED9CE5A30A2C131B
crypto_sign_ed25519_amd64_51_30k_batch_MU1: .quad 0x2106215D086329A7
crypto_sign_ed25519_amd64_51_30k_batch_MU2: .quad 0xFFFFFFFFFFFFFFEB
crypto_sign_ed25519_amd64_51_30k_batch_MU3: .quad 0xFFFFFFFFFFFFFFFF
crypto_sign_ed25519_amd64_51_30k_batch_MU4: .quad 0x000000000000000F
crypto_sign_ed25519_amd64_51_30k_batch_ORDER0: .quad 0x5812631A5CF5D3ED
crypto_sign_ed25519_amd64_51_30k_batch_ORDER1: .quad 0x14DEF9DEA2F79CD6
crypto_sign_ed25519_amd64_51_30k_batch_ORDER2: .quad 0x0000000000000000
crypto_sign_ed25519_amd64_51_30k_batch_ORDER3: .quad 0x1000000000000000
crypto_sign_ed25519_amd64_51_30k_batch_EC2D0: .quad 1859910466990425
crypto_sign_ed25519_amd64_51_30k_batch_EC2D1: .quad 932731440258426
crypto_sign_ed25519_amd64_51_30k_batch_EC2D2: .quad 1072319116312658
crypto_sign_ed25519_amd64_51_30k_batch_EC2D3: .quad 1815898335770999
crypto_sign_ed25519_amd64_51_30k_batch_EC2D4: .quad 633789495995903
crypto_sign_ed25519_amd64_51_30k_batch__38: .quad 38

View File

@@ -1,9 +1,9 @@
#define crypto_sign ed25519_amd64_51_30k_sign
#define crypto_sign_keypair ed25519_amd64_51_30k_keygen
#define crypto_sign_seckey ed25519_amd64_51_30k_seckey
#define crypto_sign_seckey_expand ed25519_amd64_51_30k_seckey_expand
#define crypto_sign_pubkey ed25519_amd64_51_30k_pubkey
#define crypto_sign_open ed25519_amd64_51_30k_open
#define crypto_sign_open_batch ed25519_amd64_51_30k_open_batch
#define crypto_sign CRYPTO_NAMESPACE(sign)
#define crypto_sign_keypair CRYPTO_NAMESPACE(keygen)
#define crypto_sign_seckey CRYPTO_NAMESPACE(seckey)
#define crypto_sign_seckey_expand CRYPTO_NAMESPACE(seckey_expand)
#define crypto_sign_pubkey CRYPTO_NAMESPACE(pubkey)
#define crypto_sign_open CRYPTO_NAMESPACE(open)
#define crypto_sign_open_batch CRYPTO_NAMESPACE(open_batch)
#include "ed25519.h"

View File

@@ -1,20 +1,20 @@
int ed25519_amd64_51_30k_seckey(unsigned char *sk);
int ed25519_amd64_51_30k_seckey_expand(unsigned char *sk,const unsigned char *seed);
int ed25519_amd64_51_30k_pubkey(unsigned char *pk,const unsigned char *sk);
int ed25519_amd64_51_30k_keygen(unsigned char *pk,unsigned char *sk);
int ed25519_amd64_51_30k_sign(
int crypto_sign_seckey(unsigned char *sk);
int crypto_sign_seckey_expand(unsigned char *sk,const unsigned char *seed);
int crypto_sign_pubkey(unsigned char *pk,const unsigned char *sk);
int crypto_sign_keypair(unsigned char *pk,unsigned char *sk);
int crypto_sign(
unsigned char *sm,unsigned long long *smlen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *sk
);
int ed25519_amd64_51_30k_open(
int crypto_sign_open(
unsigned char *m,unsigned long long *mlen,
const unsigned char *sm,unsigned long long smlen,
const unsigned char *pk
);
int ed25519_amd64_51_30k_batch(
int crypto_sign_open_batch(
unsigned char* const m[],unsigned long long mlen[],
unsigned char* const sm[],const unsigned long long smlen[],
unsigned char* const pk[],
unsigned char* const pk[],
unsigned long long num
);

View File

@@ -3,24 +3,25 @@
#include <stddef.h>
#define fe25519 crypto_sign_ed25519_amd64_51_30k_batch_fe25519
#define fe25519_freeze crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
#define fe25519_unpack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_unpack
#define fe25519_pack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pack
#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iszero_vartime
#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iseq_vartime
#define fe25519_cmov crypto_sign_ed25519_amd64_51_30k_batch_fe25519_cmov
#define fe25519_setint crypto_sign_ed25519_amd64_51_30k_batch_fe25519_setint
#define fe25519_neg crypto_sign_ed25519_amd64_51_30k_batch_fe25519_neg
#define fe25519_getparity crypto_sign_ed25519_amd64_51_30k_batch_fe25519_getparity
#define fe25519_add crypto_sign_ed25519_amd64_51_30k_batch_fe25519_add
#define fe25519_sub crypto_sign_ed25519_amd64_51_30k_batch_fe25519_sub
#define fe25519_mul crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
#define fe25519_mul121666 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul121666
#define fe25519_square crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
#define fe25519_nsquare crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
#define fe25519_invert crypto_sign_ed25519_amd64_51_30k_batch_fe25519_invert
#define fe25519_pow2523 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pow2523
#define fe25519 CRYPTO_NAMESPACE(batch_fe25519)
#define fe25519_freeze CRYPTO_NAMESPACE(batch_fe25519_freeze)
#define fe25519_unpack CRYPTO_NAMESPACE(batch_fe25519_unpack)
#define fe25519_pack CRYPTO_NAMESPACE(batch_fe25519_pack)
#define fe25519_iszero_vartime CRYPTO_NAMESPACE(batch_fe25519_iszero_vartime)
#define fe25519_iseq_vartime CRYPTO_NAMESPACE(batch_fe25519_iseq_vartime)
#define fe25519_cmov CRYPTO_NAMESPACE(batch_fe25519_cmov)
#define fe25519_setint CRYPTO_NAMESPACE(batch_fe25519_setint)
#define fe25519_neg CRYPTO_NAMESPACE(batch_fe25519_neg)
#define fe25519_getparity CRYPTO_NAMESPACE(batch_fe25519_getparity)
#define fe25519_add CRYPTO_NAMESPACE(batch_fe25519_add)
#define fe25519_sub CRYPTO_NAMESPACE(batch_fe25519_sub)
#define fe25519_mul CRYPTO_NAMESPACE(batch_fe25519_mul)
#define fe25519_mul121666 CRYPTO_NAMESPACE(batch_fe25519_mul121666)
#define fe25519_square CRYPTO_NAMESPACE(batch_fe25519_square)
#define fe25519_nsquare CRYPTO_NAMESPACE(batch_fe25519_nsquare)
#define fe25519_invert CRYPTO_NAMESPACE(batch_fe25519_invert)
#define fe25519_batchinvert CRYPTO_NAMESPACE(batch_fe25519_batchinvert)
#define fe25519_pow2523 CRYPTO_NAMESPACE(batch_fe25519_pow2523)
typedef struct
{

View File

@@ -63,13 +63,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
# qhasm: enter CRYPTO_NAMESPACE(batch_fe25519_freeze)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze:
.globl _CRYPTO_NAMESPACE(batch_fe25519_freeze)
.globl CRYPTO_NAMESPACE(batch_fe25519_freeze)
_CRYPTO_NAMESPACE(batch_fe25519_freeze):
CRYPTO_NAMESPACE(batch_fe25519_freeze):
mov %rsp,%r11
and $31,%r11
add $64,%r11
@@ -135,10 +135,10 @@ movq 24(%rdi),%r8
# asm 2: movq 32(<rp=%rdi),>r4=%r9
movq 32(%rdi),%r9
# qhasm: two51minus1 = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rax
# qhasm: two51minus1 = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>two51minus1=int64#7
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>two51minus1=%rax
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rax
# qhasm: two51minus19 = two51minus1
# asm 1: mov <two51minus1=int64#7,>two51minus19=int64#8

View File

@@ -97,13 +97,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
# qhasm: enter CRYPTO_NAMESPACE(batch_fe25519_mul)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul:
.globl _CRYPTO_NAMESPACE(batch_fe25519_mul)
.globl CRYPTO_NAMESPACE(batch_fe25519_mul)
_CRYPTO_NAMESPACE(batch_fe25519_mul):
CRYPTO_NAMESPACE(batch_fe25519_mul):
mov %rsp,%r11
and $31,%r11
add $96,%r11
@@ -689,10 +689,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.r0) << 13
# asm 1: shld $13,<r0=int64#5,<mulr01=int64#6

View File

@@ -87,13 +87,13 @@
# qhasm: stack64 n_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
# qhasm: enter CRYPTO_NAMESPACE(batch_fe25519_nsquare)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare:
.globl _CRYPTO_NAMESPACE(batch_fe25519_nsquare)
.globl CRYPTO_NAMESPACE(batch_fe25519_nsquare)
_CRYPTO_NAMESPACE(batch_fe25519_nsquare):
CRYPTO_NAMESPACE(batch_fe25519_nsquare):
mov %rsp,%r11
and $31,%r11
add $64,%r11
@@ -497,10 +497,10 @@ add %rax,%r13
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: squarer01 = (squarer01.r0) << 13
# asm 1: shld $13,<r0=int64#4,<squarer01=int64#5

View File

@@ -85,13 +85,13 @@
# qhasm: int64 squareredmask
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
# qhasm: enter CRYPTO_NAMESPACE(batch_fe25519_square)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square:
.globl _CRYPTO_NAMESPACE(batch_fe25519_square)
.globl CRYPTO_NAMESPACE(batch_fe25519_square)
_CRYPTO_NAMESPACE(batch_fe25519_square):
CRYPTO_NAMESPACE(batch_fe25519_square):
mov %rsp,%r11
and $31,%r11
add $64,%r11
@@ -492,10 +492,10 @@ add %rax,%r13
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: squarer01 = (squarer01.r0) << 13
# asm 1: shld $13,<r0=int64#4,<squarer01=int64#5

View File

@@ -12,26 +12,26 @@
#include "fe25519.h"
#include "sc25519.h"
#define ge25519 crypto_sign_ed25519_amd64_51_30k_batch_ge25519
#define ge25519_base crypto_sign_ed25519_amd64_51_30k_batch_ge25519_base
#define ge25519_unpackneg_vartime crypto_sign_ed25519_amd64_51_30k_batch_unpackneg_vartime
#define ge25519_pack crypto_sign_ed25519_amd64_51_30k_batch_pack
#define ge25519_isneutral_vartime crypto_sign_ed25519_amd64_51_30k_batch_isneutral_vartime
#define ge25519_add crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add
#define ge25519_double crypto_sign_ed25519_amd64_51_30k_batch_ge25519_double
#define ge25519_double_scalarmult_vartime crypto_sign_ed25519_amd64_51_30k_batch_double_scalarmult_vartime
#define ge25519_multi_scalarmult_vartime crypto_sign_ed25519_amd64_51_30k_batch_ge25519_multi_scalarmult_vartime
#define ge25519_scalarmult_base crypto_sign_ed25519_amd64_51_30k_batch_scalarmult_base
#define ge25519_p1p1_to_p2 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2
#define ge25519_p1p1_to_p3 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
#define ge25519_p1p1_to_pniels crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels
#define ge25519_add_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1
#define ge25519_dbl_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1
#define choose_t crypto_sign_ed25519_amd64_51_30k_batch_choose_t
#define choose_t_smultq crypto_sign_ed25519_amd64_51_30k_batch_choose_t_smultq
#define ge25519_nielsadd2 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
#define ge25519_nielsadd_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1
#define ge25519_pnielsadd_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1
#define ge25519 CRYPTO_NAMESPACE(batch_ge25519)
#define ge25519_base CRYPTO_NAMESPACE(batch_ge25519_base)
#define ge25519_unpackneg_vartime CRYPTO_NAMESPACE(batch_unpackneg_vartime)
#define ge25519_pack CRYPTO_NAMESPACE(batch_pack)
#define ge25519_isneutral_vartime CRYPTO_NAMESPACE(batch_isneutral_vartime)
#define ge25519_add CRYPTO_NAMESPACE(batch_ge25519_add)
#define ge25519_double CRYPTO_NAMESPACE(batch_ge25519_double)
#define ge25519_double_scalarmult_vartime CRYPTO_NAMESPACE(batch_double_scalarmult_vartime)
#define ge25519_multi_scalarmult_vartime CRYPTO_NAMESPACE(batch_ge25519_multi_scalarmult_vartime)
#define ge25519_scalarmult_base CRYPTO_NAMESPACE(batch_scalarmult_base)
#define ge25519_p1p1_to_p2 CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
#define ge25519_p1p1_to_p3 CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
#define ge25519_p1p1_to_pniels CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
#define ge25519_add_p1p1 CRYPTO_NAMESPACE(batch_ge25519_add_p1p1)
#define ge25519_dbl_p1p1 CRYPTO_NAMESPACE(batch_ge25519_dbl_p1p1)
#define choose_t CRYPTO_NAMESPACE(batch_choose_t)
#define choose_t_smultq CRYPTO_NAMESPACE(batch_choose_t_smultq)
#define ge25519_nielsadd2 CRYPTO_NAMESPACE(batch_ge25519_nielsadd2)
#define ge25519_nielsadd_p1p1 CRYPTO_NAMESPACE(batch_ge25519_nielsadd_p1p1)
#define ge25519_pnielsadd_p1p1 CRYPTO_NAMESPACE(batch_ge25519_pnielsadd_p1p1)
#define ge25519_p3 ge25519

View File

@@ -247,13 +247,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_add_p1p1)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1:
.globl _CRYPTO_NAMESPACE(batch_ge25519_add_p1p1)
.globl CRYPTO_NAMESPACE(batch_ge25519_add_p1p1)
_CRYPTO_NAMESPACE(batch_ge25519_add_p1p1):
CRYPTO_NAMESPACE(batch_ge25519_add_p1p1):
mov %rsp,%r11
and $31,%r11
add $256,%r11
@@ -329,10 +329,10 @@ movq 72(%rsi),%r10
# asm 2: mov <a0=%rdx,>b0=%r11
mov %rdx,%r11
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: a0 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<a0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<a0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: x0 = *(uint64 *)(pp + 0)
# asm 1: movq 0(<pp=int64#2),>x0=int64#10
@@ -354,10 +354,10 @@ sub %r12,%rdx
# asm 2: mov <a1=%r8,>b1=%r12
mov %r8,%r12
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: a1 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a1=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a1=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: x1 = *(uint64 *)(pp + 8)
# asm 1: movq 8(<pp=int64#2),>x1=int64#11
@@ -379,10 +379,10 @@ sub %r13,%r8
# asm 2: mov <a2=%r9,>b2=%r13
mov %r9,%r13
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: a2 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a2=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a2=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: x2 = *(uint64 *)(pp + 16)
# asm 1: movq 16(<pp=int64#2),>x2=int64#12
@@ -404,10 +404,10 @@ sub %r14,%r9
# asm 2: mov <a3=%rax,>b3=%r14
mov %rax,%r14
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: a3 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a3=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a3=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: x3 = *(uint64 *)(pp + 24)
# asm 1: movq 24(<pp=int64#2),>x3=int64#13
@@ -429,10 +429,10 @@ sub %r15,%rax
# asm 2: mov <a4=%r10,>b4=%r15
mov %r10,%r15
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: a4 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a4=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a4=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: x4 = *(uint64 *)(pp + 32)
# asm 1: movq 32(<pp=int64#2),>x4=int64#14
@@ -529,10 +529,10 @@ movq 72(%rcx),%r10
# asm 2: mov <t10=%rdx,>t20=%r11
mov %rdx,%r11
# qhasm: t10 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<t10=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<t10=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: t10 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<t10=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<t10=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: rx0 = *(uint64 *) (qp + 0)
# asm 1: movq 0(<qp=int64#4),>rx0=int64#10
@@ -554,10 +554,10 @@ sub %r12,%rdx
# asm 2: mov <t11=%r8,>t21=%r12
mov %r8,%r12
# qhasm: t11 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t11=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t11=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: t11 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<t11=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<t11=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: rx1 = *(uint64 *) (qp + 8)
# asm 1: movq 8(<qp=int64#4),>rx1=int64#11
@@ -579,10 +579,10 @@ sub %r13,%r8
# asm 2: mov <t12=%r9,>t22=%r13
mov %r9,%r13
# qhasm: t12 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t12=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t12=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: t12 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<t12=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<t12=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: rx2 = *(uint64 *) (qp + 16)
# asm 1: movq 16(<qp=int64#4),>rx2=int64#12
@@ -604,10 +604,10 @@ sub %r14,%r9
# asm 2: mov <t13=%rax,>t23=%r14
mov %rax,%r14
# qhasm: t13 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t13=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t13=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: t13 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<t13=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<t13=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: rx3 = *(uint64 *) (qp + 24)
# asm 1: movq 24(<qp=int64#4),>rx3=int64#13
@@ -629,10 +629,10 @@ sub %r15,%rax
# asm 2: mov <t14=%r10,>t24=%r15
mov %r10,%r15
# qhasm: t14 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t14=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<t14=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: t14 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<t14=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<t14=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: rx4 = *(uint64 *) (qp + 32)
# asm 1: movq 32(<qp=int64#4),>rx4=int64#14
@@ -1234,10 +1234,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#5,<mulr01=int64#6
@@ -1984,10 +1984,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#5,<mulr01=int64#6
@@ -2199,30 +2199,30 @@ mov %r11,%r14
# asm 2: mov <rx4=%r12,>ry4=%r15
mov %r12,%r15
# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
# qhasm: rx0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<rx0=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<rx0=%r8
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%r8
# qhasm: rx1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: rx1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx1=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx1=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: rx2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: rx2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx2=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx2=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: rx3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
# qhasm: rx3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx3=int64#9
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx3=%r11
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r11
# qhasm: rx4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: rx4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx4=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx4=%r12
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: ry0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3
@@ -2859,10 +2859,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@@ -3089,8 +3089,8 @@ imulq $19,%rdx,%rax
# asm 2: movq <mulrax=%rax,>mulx319_stack=96(%rsp)
movq %rax,96(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: c0 = mulrax
# asm 1: mov <mulrax=int64#7,>c0=int64#5
@@ -3117,8 +3117,8 @@ imulq $19,%rdx,%rax
# asm 2: movq <mulrax=%rax,>mulx419_stack=104(%rsp)
movq %rax,104(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@@ -3135,8 +3135,8 @@ adc %rdx,%r9
# asm 2: movq <c0_stack=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@@ -3153,8 +3153,8 @@ adc %rdx,%r9
# asm 2: movq <c0_stack=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: c1 = mulrax
# asm 1: mov <mulrax=int64#7,>c1=int64#8
@@ -3171,8 +3171,8 @@ mov %rdx,%r11
# asm 2: movq <c0_stack=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: c2 = mulrax
# asm 1: mov <mulrax=int64#7,>c2=int64#10
@@ -3189,8 +3189,8 @@ mov %rdx,%r13
# asm 2: movq <c0_stack=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: c3 = mulrax
# asm 1: mov <mulrax=int64#7,>c3=int64#12
@@ -3207,8 +3207,8 @@ mov %rdx,%r15
# asm 2: movq <c0_stack=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: c4 = mulrax
# asm 1: mov <mulrax=int64#7,>c4=int64#14
@@ -3225,8 +3225,8 @@ mov %rdx,%rbp
# asm 2: movq <c1_stack=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@@ -3243,8 +3243,8 @@ adc %rdx,%r11
# asm 2: movq <c1_stack=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@@ -3261,8 +3261,8 @@ adc %rdx,%r13
# asm 2: movq <c1_stack=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@@ -3279,8 +3279,8 @@ adc %rdx,%r15
# asm 2: movq <c1_stack=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@@ -3302,8 +3302,8 @@ movq 64(%rsp),%rdx
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@@ -3320,8 +3320,8 @@ adc %rdx,%r9
# asm 2: movq <c2_stack=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@@ -3338,8 +3338,8 @@ adc %rdx,%r13
# asm 2: movq <c2_stack=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@@ -3356,8 +3356,8 @@ adc %rdx,%r15
# asm 2: movq <c2_stack=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@@ -3379,8 +3379,8 @@ movq 72(%rsp),%rdx
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? c0 += mulrax
# asm 1: add <mulrax=int64#7,<c0=int64#5
@@ -3402,8 +3402,8 @@ movq 72(%rsp),%rdx
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@@ -3420,8 +3420,8 @@ adc %rdx,%r11
# asm 2: movq <c3_stack=80(%rsp),>mulrax=%rax
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@@ -3438,8 +3438,8 @@ adc %rdx,%r15
# asm 2: movq <c3_stack=80(%rsp),>mulrax=%rax
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@@ -3456,8 +3456,8 @@ adc %rdx,%rbp
# asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@@ -3474,8 +3474,8 @@ adc %rdx,%r11
# asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@@ -3492,8 +3492,8 @@ adc %rdx,%r13
# asm 2: movq <c4_stack=88(%rsp),>mulrax=%rax
movq 88(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? c4 += mulrax
# asm 1: add <mulrax=int64#7,<c4=int64#14
@@ -3510,8 +3510,8 @@ adc %rdx,%rbp
# asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: carry? c1 += mulrax
# asm 1: add <mulrax=int64#7,<c1=int64#8
@@ -3528,8 +3528,8 @@ adc %rdx,%r11
# asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? c2 += mulrax
# asm 1: add <mulrax=int64#7,<c2=int64#10
@@ -3546,8 +3546,8 @@ adc %rdx,%r13
# asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? c3 += mulrax
# asm 1: add <mulrax=int64#7,<c3=int64#12
@@ -3559,10 +3559,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@@ -4309,10 +4309,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#5,<mulr01=int64#6
@@ -4549,30 +4549,30 @@ mov %rax,%r12
# asm 2: mov <rt4=%r10,>rz4=%r13
mov %r10,%r13
# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
# qhasm: rt0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<rt0=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<rt0=%r8
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%r8
# qhasm: rt1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: rt1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt1=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt1=%rcx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: rt2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: rt2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt2=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt2=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: rt3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: rt3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt3=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt3=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: rt4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: rt4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt4=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt4=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: rz0 += c0_stack
# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2

View File

@@ -241,13 +241,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_dbl_p1p1)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1:
.globl _CRYPTO_NAMESPACE(batch_ge25519_dbl_p1p1)
.globl CRYPTO_NAMESPACE(batch_ge25519_dbl_p1p1)
_CRYPTO_NAMESPACE(batch_ge25519_dbl_p1p1):
CRYPTO_NAMESPACE(batch_ge25519_dbl_p1p1):
mov %rsp,%r11
and $31,%r11
add $224,%r11
@@ -648,10 +648,10 @@ add %rax,%r13
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: squarer01 = (squarer01.a0) << 13
# asm 1: shld $13,<a0=int64#4,<squarer01=int64#5
@@ -1223,10 +1223,10 @@ add %rax,%r13
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: squarer01 = (squarer01.b0) << 13
# asm 1: shld $13,<b0=int64#4,<squarer01=int64#5
@@ -1798,10 +1798,10 @@ add %rax,%r13
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: squarer01 = (squarer01.c0) << 13
# asm 1: shld $13,<c0=int64#4,<squarer01=int64#5
@@ -2038,30 +2038,30 @@ movq %r10,160(%rsp)
# asm 2: movq <c4=%r11,>c4_stack=168(%rsp)
movq %r11,168(%rsp)
# qhasm: d0 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: d0 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P0),>d0=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_2P0),>d0=%rdx
movq CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: d1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=int64#4
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=%rcx
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: d1 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d1=int64#4
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d1=%rcx
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: d2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=int64#5
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=%r8
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: d2 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d2=int64#5
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d2=%r8
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: d3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=int64#6
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=%r9
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: d3 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d3=int64#6
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d3=%r9
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: d4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: d4 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d4=int64#7
# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d4=%rax
movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: e0 = d0
# asm 1: mov <d0=int64#3,>e0=int64#8
@@ -2263,30 +2263,30 @@ movq %r13,64(%rdi)
# asm 2: movq <rz4=%r14,72(<rp=%rdi)
movq %r14,72(%rdi)
# qhasm: d0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<d0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<d0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: d0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<d0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<d0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: d1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: d1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<d1=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<d1=%rcx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: d2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d2=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d2=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: d2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<d2=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<d2=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: d3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d3=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d3=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: d3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<d3=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<d3=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: d4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d4=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<d4=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: d4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<d4=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<d4=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: d0 -= b0_stack
# asm 1: subq <b0_stack=stack64#13,<d0=int64#3
@@ -2338,30 +2338,30 @@ movq %r9,104(%rdi)
# asm 2: movq <d4=%rax,112(<rp=%rdi)
movq %rax,112(%rdi)
# qhasm: rz0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P0,<rz0=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P0,<rz0=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_4P0,%r10
# qhasm: rz0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_4P0)
# asm 1: add CRYPTO_NAMESPACE(batch_4P0),<rz0=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_4P0),<rz0=%r10
add CRYPTO_NAMESPACE(batch_4P0)(%rip),%r10
# qhasm: rz1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz1=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz1=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r11
# qhasm: rz1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_4P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_4P1234),<rz1=int64#9
# asm 2: add CRYPTO_NAMESPACE(batch_4P1234),<rz1=%r11
add CRYPTO_NAMESPACE(batch_4P1234)(%rip),%r11
# qhasm: rz2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz2=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz2=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r12
# qhasm: rz2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_4P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_4P1234),<rz2=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_4P1234),<rz2=%r12
add CRYPTO_NAMESPACE(batch_4P1234)(%rip),%r12
# qhasm: rz3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz3=int64#11
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz3=%r13
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r13
# qhasm: rz3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_4P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_4P1234),<rz3=int64#11
# asm 2: add CRYPTO_NAMESPACE(batch_4P1234),<rz3=%r13
add CRYPTO_NAMESPACE(batch_4P1234)(%rip),%r13
# qhasm: rz4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_4P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz4=int64#12
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,<rz4=%r14
add crypto_sign_ed25519_amd64_51_30k_batch_4P1234,%r14
# qhasm: rz4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_4P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_4P1234),<rz4=int64#12
# asm 2: add CRYPTO_NAMESPACE(batch_4P1234),<rz4=%r14
add CRYPTO_NAMESPACE(batch_4P1234)(%rip),%r14
# qhasm: rz0 -= c0_stack
# asm 1: subq <c0_stack=stack64#18,<rz0=int64#8
@@ -2848,10 +2848,10 @@ add %rax,%r12
# asm 2: adc <squarerdx=%rdx,<squarer31=%r13
adc %rdx,%r13
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: squarer01 = (squarer01.rx0) << 13
# asm 1: shld $13,<rx0=int64#2,<squarer01=int64#4

View File

@@ -7,7 +7,7 @@
#define S2_SWINDOWSIZE 7
#define PRE2_SIZE (1<<(S2_SWINDOWSIZE-2))
ge25519_niels pre2[PRE2_SIZE] = {
static const ge25519_niels pre2[PRE2_SIZE] = {
#include "ge25519_base_slide_multiples.data"
};

View File

@@ -333,13 +333,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_nielsadd2)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2:
.globl _CRYPTO_NAMESPACE(batch_ge25519_nielsadd2)
.globl CRYPTO_NAMESPACE(batch_ge25519_nielsadd2)
_CRYPTO_NAMESPACE(batch_ge25519_nielsadd2):
CRYPTO_NAMESPACE(batch_ge25519_nielsadd2):
mov %rsp,%r11
and $31,%r11
add $256,%r11
@@ -430,30 +430,30 @@ mov %r9,%r13
# asm 2: mov <a4=%rax,>b4=%r14
mov %rax,%r14
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: a0 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<a0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<a0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: a1 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a1=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a1=%rcx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: a2 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a2=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a2=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: a3 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a3=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a3=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: a4 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a4=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a4=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: b0 += *(uint64 *) (rp + 0)
# asm 1: addq 0(<rp=int64#1),<b0=int64#8
@@ -1090,10 +1090,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#4,<mulr01=int64#5
@@ -1840,10 +1840,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.e0) << 13
# asm 1: shld $13,<e0=int64#4,<mulr01=int64#5
@@ -2055,30 +2055,30 @@ mov %r10,%r13
# asm 2: mov <e4=%r11,>h4=%r14
mov %r11,%r14
# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rcx
# qhasm: e0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<e0=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<e0=%rcx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rcx
# qhasm: e1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: e1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e1=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e1=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: e2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: e2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e2=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e2=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: e3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: e3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e3=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e3=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: e4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
# qhasm: e4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e4=int64#9
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e4=%r11
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r11
# qhasm: h0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<h0=int64#3
@@ -2715,10 +2715,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#4,<mulr01=int64#5
@@ -2985,30 +2985,30 @@ mov %r11,%rbx
# asm 2: mov <f4=%r12,>g4=%rbp
mov %r12,%rbp
# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=int64#2
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=%rsi
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rsi
# qhasm: f0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<f0=int64#2
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<f0=%rsi
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rsi
# qhasm: f1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rdx
# qhasm: f1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f1=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f1=%rdx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rdx
# qhasm: f2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: f2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f2=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f2=%rcx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: f3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
# qhasm: f3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f3=int64#9
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f3=%r11
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r11
# qhasm: f4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: f4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f4=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f4=%r12
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: g0 += c0_stack
# asm 1: addq <c0_stack=stack64#18,<g0=int64#11
@@ -3645,10 +3645,10 @@ add %rax,%r12
# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#2,<mulr01=int64#4
@@ -4395,10 +4395,10 @@ add %rax,%r12
# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.ry0) << 13
# asm 1: shld $13,<ry0=int64#2,<mulr01=int64#4
@@ -5145,10 +5145,10 @@ add %rax,%r12
# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#2,<mulr01=int64#4
@@ -5895,10 +5895,10 @@ add %rax,%r12
# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#2,<mulr01=int64#4

View File

@@ -337,13 +337,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_nielsadd_p1p1)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1:
.globl _CRYPTO_NAMESPACE(batch_ge25519_nielsadd_p1p1)
.globl CRYPTO_NAMESPACE(batch_ge25519_nielsadd_p1p1)
_CRYPTO_NAMESPACE(batch_ge25519_nielsadd_p1p1):
CRYPTO_NAMESPACE(batch_ge25519_nielsadd_p1p1):
mov %rsp,%r11
and $31,%r11
add $160,%r11
@@ -439,30 +439,30 @@ mov %rax,%r14
# asm 2: mov <a4=%r10,>b4=%r15
mov %r10,%r15
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: a0 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<a0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<a0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: a1 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a1=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a1=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: a2 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a2=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a2=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: a3 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a3=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a3=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: a4 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a4=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a4=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: b0 += *(uint64 *) (pp + 0)
# asm 1: addq 0(<pp=int64#2),<b0=int64#9
@@ -1099,10 +1099,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#5,<mulr01=int64#6
@@ -1849,10 +1849,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.e0) << 13
# asm 1: shld $13,<e0=int64#5,<mulr01=int64#6
@@ -2064,30 +2064,30 @@ mov %r11,%r14
# asm 2: mov <e4=%r12,>h4=%r15
mov %r12,%r15
# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
# qhasm: e0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<e0=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<e0=%r8
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%r8
# qhasm: e1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: e1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e1=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e1=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: e2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: e2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e2=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e2=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: e3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
# qhasm: e3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e3=int64#9
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e3=%r11
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r11
# qhasm: e4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: e4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<e4=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<e4=%r12
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: h0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<h0=int64#3
@@ -2724,10 +2724,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@@ -2994,30 +2994,30 @@ mov %r12,%rbx
# asm 2: mov <f4=%rsi,>g4=%rbp
mov %rsi,%rbp
# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: f0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<f0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<f0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: f1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: f1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f1=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f1=%rcx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: f2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: f2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f2=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f2=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: f3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: f3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f3=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f3=%r12
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: f4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=int64#2
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=%rsi
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rsi
# qhasm: f4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<f4=int64#2
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<f4=%rsi
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rsi
# qhasm: g0 += c0_stack
# asm 1: addq <c0_stack=stack64#8,<g0=int64#11

View File

@@ -103,13 +103,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2:
.globl _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
.globl CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2)
_CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2):
CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p2):
mov %rsp,%r11
and $31,%r11
add $96,%r11
@@ -685,10 +685,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
@@ -1435,10 +1435,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.ry0) << 13
# asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
@@ -2185,10 +2185,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5

View File

@@ -113,13 +113,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3:
.globl _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
.globl CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3)
_CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3):
CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_p3):
mov %rsp,%r11
and $31,%r11
add $96,%r11
@@ -695,10 +695,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
@@ -1445,10 +1445,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.ry0) << 13
# asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
@@ -2195,10 +2195,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
@@ -2945,10 +2945,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#4,<mulr01=int64#5

View File

@@ -171,13 +171,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels:
.globl _CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
.globl CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels)
_CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels):
CRYPTO_NAMESPACE(batch_ge25519_p1p1_to_pniels):
mov %rsp,%r11
and $31,%r11
add $128,%r11
@@ -753,10 +753,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.x0) << 13
# asm 1: shld $13,<x0=int64#4,<mulr01=int64#5
@@ -1503,10 +1503,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.y0) << 13
# asm 1: shld $13,<y0=int64#4,<mulr01=int64#5
@@ -1718,30 +1718,30 @@ mov %r10,%r13
# asm 2: mov <y4=%r11,>ysubx4=%r14
mov %r11,%r14
# qhasm: ysubx0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<ysubx0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<ysubx0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: ysubx0 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<ysubx0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<ysubx0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: ysubx1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: ysubx1 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx1=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx1=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: ysubx2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx2=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx2=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: ysubx2 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx2=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx2=%r12
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: ysubx3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx3=int64#11
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx3=%r13
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r13
# qhasm: ysubx3 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx3=int64#11
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx3=%r13
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r13
# qhasm: ysubx4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx4=int64#12
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<ysubx4=%r14
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r14
# qhasm: ysubx4 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx4=int64#12
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<ysubx4=%r14
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r14
# qhasm: x0 = stackx0
# asm 1: movq <stackx0=stack64#8,>x0=int64#13
@@ -2403,10 +2403,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rz0) << 13
# asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
@@ -3153,10 +3153,10 @@ add %rax,%r13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
adc %rdx,%r14
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.t0) << 13
# asm 1: shld $13,<t0=int64#4,<mulr01=int64#5
@@ -3383,8 +3383,8 @@ imulq $19,%rsi,%rax
# asm 2: movq <mulrax=%rax,>mulx319_stack=96(%rsp)
movq %rax,96(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: t2d0 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d0=int64#2
@@ -3411,8 +3411,8 @@ imulq $19,%rdx,%rax
# asm 2: movq <mulrax=%rax,>mulx419_stack=104(%rsp)
movq %rax,104(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@@ -3429,8 +3429,8 @@ adc %rdx,%rcx
# asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@@ -3447,8 +3447,8 @@ adc %rdx,%rcx
# asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: t2d1 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d1=int64#5
@@ -3465,8 +3465,8 @@ mov %rdx,%r9
# asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: t2d2 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d2=int64#8
@@ -3483,8 +3483,8 @@ mov %rdx,%r11
# asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: t2d3 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d3=int64#10
@@ -3501,8 +3501,8 @@ mov %rdx,%r13
# asm 2: movq <stackt0=56(%rsp),>mulrax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: t2d4 = mulrax
# asm 1: mov <mulrax=int64#7,>t2d4=int64#12
@@ -3519,8 +3519,8 @@ mov %rdx,%r15
# asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@@ -3537,8 +3537,8 @@ adc %rdx,%r9
# asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@@ -3555,8 +3555,8 @@ adc %rdx,%r11
# asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@@ -3573,8 +3573,8 @@ adc %rdx,%r13
# asm 2: movq <stackt1=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@@ -3596,8 +3596,8 @@ movq 64(%rsp),%rdx
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@@ -3614,8 +3614,8 @@ adc %rdx,%rcx
# asm 2: movq <stackt2=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@@ -3632,8 +3632,8 @@ adc %rdx,%r11
# asm 2: movq <stackt2=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@@ -3650,8 +3650,8 @@ adc %rdx,%r13
# asm 2: movq <stackt2=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@@ -3673,8 +3673,8 @@ movq 72(%rsp),%rdx
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? t2d0 += mulrax
# asm 1: add <mulrax=int64#7,<t2d0=int64#2
@@ -3696,8 +3696,8 @@ movq 72(%rsp),%rdx
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@@ -3714,8 +3714,8 @@ adc %rdx,%r9
# asm 2: movq <stackt3=80(%rsp),>mulrax=%rax
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@@ -3732,8 +3732,8 @@ adc %rdx,%r13
# asm 2: movq <stackt3=80(%rsp),>mulrax=%rax
movq 80(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1)
mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@@ -3750,8 +3750,8 @@ adc %rdx,%r15
# asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@@ -3768,8 +3768,8 @@ adc %rdx,%r9
# asm 2: movq <mulx319_stack=96(%rsp),>mulrax=%rax
movq 96(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@@ -3786,8 +3786,8 @@ adc %rdx,%r11
# asm 2: movq <stackt4=88(%rsp),>mulrax=%rax
movq 88(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0)
mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip)
# qhasm: carry? t2d4 += mulrax
# asm 1: add <mulrax=int64#7,<t2d4=int64#12
@@ -3804,8 +3804,8 @@ adc %rdx,%r15
# asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2)
mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip)
# qhasm: carry? t2d1 += mulrax
# asm 1: add <mulrax=int64#7,<t2d1=int64#5
@@ -3822,8 +3822,8 @@ adc %rdx,%r9
# asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3)
mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip)
# qhasm: carry? t2d2 += mulrax
# asm 1: add <mulrax=int64#7,<t2d2=int64#8
@@ -3840,8 +3840,8 @@ adc %rdx,%r11
# asm 2: movq <mulx419_stack=104(%rsp),>mulrax=%rax
movq 104(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4)
mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip)
# qhasm: carry? t2d3 += mulrax
# asm 1: add <mulrax=int64#7,<t2d3=int64#10
@@ -3853,10 +3853,10 @@ add %rax,%r12
# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
adc %rdx,%r13
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.t2d0) << 13
# asm 1: shld $13,<t2d0=int64#2,<mulr01=int64#4

View File

@@ -247,13 +247,13 @@
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1
# qhasm: enter CRYPTO_NAMESPACE(batch_ge25519_pnielsadd_p1p1)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1
.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1
_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1:
crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1:
.globl _CRYPTO_NAMESPACE(batch_ge25519_pnielsadd_p1p1)
.globl CRYPTO_NAMESPACE(batch_ge25519_pnielsadd_p1p1)
_CRYPTO_NAMESPACE(batch_ge25519_pnielsadd_p1p1):
CRYPTO_NAMESPACE(batch_ge25519_pnielsadd_p1p1):
mov %rsp,%r11
and $31,%r11
add $160,%r11
@@ -349,30 +349,30 @@ mov %rax,%r14
# asm 2: mov <a4=%r10,>b4=%r15
mov %r10,%r15
# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
# qhasm: a0 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<a0=int64#3
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<a0=%rdx
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx
# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
# qhasm: a1 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a1=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a1=%r8
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8
# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: a2 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a2=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a2=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: a3 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a3=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a3=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: a4 += *(uint64 *) &CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<a4=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<a4=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: b0 += *(uint64 *) (pp + 0)
# asm 1: addq 0(<pp=int64#2),<b0=int64#9
@@ -1009,10 +1009,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.a0) << 13
# asm 1: shld $13,<a0=int64#5,<mulr01=int64#6
@@ -1759,10 +1759,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.rx0) << 13
# asm 1: shld $13,<rx0=int64#5,<mulr01=int64#6
@@ -1974,30 +1974,30 @@ mov %r11,%r14
# asm 2: mov <rx4=%r12,>ry4=%r15
mov %r12,%r15
# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rx0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
# qhasm: rx0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<rx0=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<rx0=%r8
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%r8
# qhasm: rx1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx1=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: rx1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx1=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx1=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: rx2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx2=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: rx2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx2=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx2=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: rx3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=int64#9
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx3=%r11
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
# qhasm: rx3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx3=int64#9
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx3=%r11
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r11
# qhasm: rx4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=int64#10
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rx4=%r12
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
# qhasm: rx4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rx4=int64#10
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rx4=%r12
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12
# qhasm: ry0 += a0_stack
# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3
@@ -2634,10 +2634,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx
# qhasm: mulr01 = (mulr01.c0) << 13
# asm 1: shld $13,<c0=int64#5,<mulr01=int64#6
@@ -3384,10 +3384,10 @@ add %rax,%r14
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51)
# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2
# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi
movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi
# qhasm: mulr01 = (mulr01.rt0) << 13
# asm 1: shld $13,<rt0=int64#5,<mulr01=int64#6
@@ -3624,30 +3624,30 @@ mov %rax,%r12
# asm 2: mov <rt4=%r10,>rz4=%r13
mov %r10,%r13
# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=int64#5
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,<rt0=%r8
add crypto_sign_ed25519_amd64_51_30k_batch_2P0,%r8
# qhasm: rt0 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0)
# asm 1: add CRYPTO_NAMESPACE(batch_2P0),<rt0=int64#5
# asm 2: add CRYPTO_NAMESPACE(batch_2P0),<rt0=%r8
add CRYPTO_NAMESPACE(batch_2P0)(%rip),%r8
# qhasm: rt1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=int64#4
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt1=%rcx
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
# qhasm: rt1 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt1=int64#4
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt1=%rcx
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx
# qhasm: rt2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=int64#6
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt2=%r9
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
# qhasm: rt2 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt2=int64#6
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt2=%r9
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9
# qhasm: rt3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=int64#7
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt3=%rax
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
# qhasm: rt3 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt3=int64#7
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt3=%rax
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax
# qhasm: rt4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=int64#8
# asm 2: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<rt4=%r10
add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
# qhasm: rt4 += *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234)
# asm 1: add CRYPTO_NAMESPACE(batch_2P1234),<rt4=int64#8
# asm 2: add CRYPTO_NAMESPACE(batch_2P1234),<rt4=%r10
add CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r10
# qhasm: rz0 += c0_stack
# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2

View File

@@ -93,13 +93,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
# qhasm: enter CRYPTO_NAMESPACE(batch_heap_rootreplaced)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced:
.globl _CRYPTO_NAMESPACE(batch_heap_rootreplaced)
.globl CRYPTO_NAMESPACE(batch_heap_rootreplaced)
_CRYPTO_NAMESPACE(batch_heap_rootreplaced):
CRYPTO_NAMESPACE(batch_heap_rootreplaced):
mov %rsp,%r11
and $31,%r11
add $64,%r11

View File

@@ -93,13 +93,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
# qhasm: enter CRYPTO_NAMESPACE(batch_heap_rootreplaced_1limb)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb:
.globl _CRYPTO_NAMESPACE(batch_heap_rootreplaced_1limb)
.globl CRYPTO_NAMESPACE(batch_heap_rootreplaced_1limb)
_CRYPTO_NAMESPACE(batch_heap_rootreplaced_1limb):
CRYPTO_NAMESPACE(batch_heap_rootreplaced_1limb):
mov %rsp,%r11
and $31,%r11
add $64,%r11

View File

@@ -93,13 +93,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
# qhasm: enter CRYPTO_NAMESPACE(batch_heap_rootreplaced_2limbs)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs:
.globl _CRYPTO_NAMESPACE(batch_heap_rootreplaced_2limbs)
.globl CRYPTO_NAMESPACE(batch_heap_rootreplaced_2limbs)
_CRYPTO_NAMESPACE(batch_heap_rootreplaced_2limbs):
CRYPTO_NAMESPACE(batch_heap_rootreplaced_2limbs):
mov %rsp,%r11
and $31,%r11
add $64,%r11

View File

@@ -93,13 +93,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
# qhasm: enter CRYPTO_NAMESPACE(batch_heap_rootreplaced_3limbs)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs:
.globl _CRYPTO_NAMESPACE(batch_heap_rootreplaced_3limbs)
.globl CRYPTO_NAMESPACE(batch_heap_rootreplaced_3limbs)
_CRYPTO_NAMESPACE(batch_heap_rootreplaced_3limbs):
CRYPTO_NAMESPACE(batch_heap_rootreplaced_3limbs):
mov %rsp,%r11
and $31,%r11
add $64,%r11

View File

@@ -1,7 +1,7 @@
#ifndef HRAM_H
#define HRAM_H
#define get_hram crypto_sign_ed25519_amd64_51_30k_batch_get_hram
#define get_hram CRYPTO_NAMESPACE(batch_get_hram)
extern void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen);

View File

@@ -3,15 +3,15 @@
#include "sc25519.h"
#define heap_init crypto_sign_ed25519_amd64_51_30k_batch_heap_init
#define heap_extend crypto_sign_ed25519_amd64_51_30k_batch_heap_extend
#define heap_pop crypto_sign_ed25519_amd64_51_30k_batch_heap_pop
#define heap_push crypto_sign_ed25519_amd64_51_30k_batch_heap_push
#define heap_get2max crypto_sign_ed25519_amd64_51_30k_batch_heap_get2max
#define heap_rootreplaced crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
#define heap_rootreplaced_3limbs crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
#define heap_rootreplaced_2limbs crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
#define heap_rootreplaced_1limb crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
#define heap_init CRYPTO_NAMESPACE(batch_heap_init)
#define heap_extend CRYPTO_NAMESPACE(batch_heap_extend)
#define heap_pop CRYPTO_NAMESPACE(batch_heap_pop)
#define heap_push CRYPTO_NAMESPACE(batch_heap_push)
#define heap_get2max CRYPTO_NAMESPACE(batch_heap_get2max)
#define heap_rootreplaced CRYPTO_NAMESPACE(batch_heap_rootreplaced)
#define heap_rootreplaced_3limbs CRYPTO_NAMESPACE(batch_heap_rootreplaced_3limbs)
#define heap_rootreplaced_2limbs CRYPTO_NAMESPACE(batch_heap_rootreplaced_2limbs)
#define heap_rootreplaced_1limb CRYPTO_NAMESPACE(batch_heap_rootreplaced_1limb)
void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);

View File

@@ -1,25 +1,25 @@
#ifndef SC25519_H
#define SC25519_H
#define sc25519 crypto_sign_ed25519_amd64_51_30k_batch_sc25519
#define shortsc25519 crypto_sign_ed25519_amd64_51_30k_batch_shortsc25519
#define sc25519_from32bytes crypto_sign_ed25519_amd64_51_30k_batch_sc25519_from32bytes
#define shortsc25519_from16bytes crypto_sign_ed25519_amd64_51_30k_batch_shortsc25519_from16bytes
#define sc25519_from64bytes crypto_sign_ed25519_amd64_51_30k_batch_sc25519_from64bytes
#define sc25519_from_shortsc crypto_sign_ed25519_amd64_51_30k_batch_sc25519_from_shortsc
#define sc25519_to32bytes crypto_sign_ed25519_amd64_51_30k_batch_sc25519_to32bytes
#define sc25519_iszero_vartime crypto_sign_ed25519_amd64_51_30k_batch_sc25519_iszero_vartime
#define sc25519_isshort_vartime crypto_sign_ed25519_amd64_51_30k_batch_sc25519_isshort_vartime
#define sc25519_lt crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
#define sc25519_add crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
#define sc25519_sub_nored crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
#define sc25519_mul crypto_sign_ed25519_amd64_51_30k_batch_sc25519_mul
#define sc25519_mul_shortsc crypto_sign_ed25519_amd64_51_30k_batch_sc25519_mul_shortsc
#define sc25519_window4 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_window4
#define sc25519_window5 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_window5
#define sc25519_slide crypto_sign_ed25519_amd64_51_30k_batch_sc25519_slide
#define sc25519_2interleave2 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_2interleave2
#define sc25519_barrett crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
#define sc25519 CRYPTO_NAMESPACE(batch_sc25519)
#define shortsc25519 CRYPTO_NAMESPACE(batch_shortsc25519)
#define sc25519_from32bytes CRYPTO_NAMESPACE(batch_sc25519_from32bytes)
#define shortsc25519_from16bytes CRYPTO_NAMESPACE(batch_shortsc25519_from16bytes)
#define sc25519_from64bytes CRYPTO_NAMESPACE(batch_sc25519_from64bytes)
#define sc25519_from_shortsc CRYPTO_NAMESPACE(batch_sc25519_from_shortsc)
#define sc25519_to32bytes CRYPTO_NAMESPACE(batch_sc25519_to32bytes)
#define sc25519_iszero_vartime CRYPTO_NAMESPACE(batch_sc25519_iszero_vartime)
#define sc25519_isshort_vartime CRYPTO_NAMESPACE(batch_sc25519_isshort_vartime)
#define sc25519_lt CRYPTO_NAMESPACE(batch_sc25519_lt)
#define sc25519_add CRYPTO_NAMESPACE(batch_sc25519_add)
#define sc25519_sub_nored CRYPTO_NAMESPACE(batch_sc25519_sub_nored)
#define sc25519_mul CRYPTO_NAMESPACE(batch_sc25519_mul)
#define sc25519_mul_shortsc CRYPTO_NAMESPACE(batch_sc25519_mul_shortsc)
#define sc25519_window4 CRYPTO_NAMESPACE(batch_sc25519_window4)
#define sc25519_window5 CRYPTO_NAMESPACE(batch_sc25519_window5)
#define sc25519_slide CRYPTO_NAMESPACE(batch_sc25519_slide)
#define sc25519_2interleave2 CRYPTO_NAMESPACE(batch_sc25519_2interleave2)
#define sc25519_barrett CRYPTO_NAMESPACE(batch_sc25519_barrett)
typedef struct
{

View File

@@ -63,13 +63,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
# qhasm: enter CRYPTO_NAMESPACE(batch_sc25519_add)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add:
.globl _CRYPTO_NAMESPACE(batch_sc25519_add)
.globl CRYPTO_NAMESPACE(batch_sc25519_add)
_CRYPTO_NAMESPACE(batch_sc25519_add):
CRYPTO_NAMESPACE(batch_sc25519_add):
mov %rsp,%r11
and $31,%r11
add $32,%r11
@@ -150,25 +150,25 @@ mov %r9,%r10
# asm 2: mov <r3=%rsi,>t3=%r14
mov %rsi,%r14
# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#3
# asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rdx
sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,%rdx
# qhasm: carry? t0 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
# asm 1: sub CRYPTO_NAMESPACE(batch_ORDER0),<t0=int64#3
# asm 2: sub CRYPTO_NAMESPACE(batch_ORDER0),<t0=%rdx
sub CRYPTO_NAMESPACE(batch_ORDER0)(%rip),%rdx
# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#7
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%rax
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,%rax
# qhasm: carry? t1 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER1),<t1=int64#7
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER1),<t1=%rax
sbb CRYPTO_NAMESPACE(batch_ORDER1)(%rip),%rax
# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,%r10
# qhasm: carry? t2 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER2),<t2=int64#8
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER2),<t2=%r10
sbb CRYPTO_NAMESPACE(batch_ORDER2)(%rip),%r10
# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#12
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r14
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,%r14
# qhasm: unsigned<? t3 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER3) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER3),<t3=int64#12
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER3),<t3=%r14
sbb CRYPTO_NAMESPACE(batch_ORDER3)(%rip),%r14
# qhasm: r0 = t0 if !unsigned<
# asm 1: cmovae <t0=int64#3,<r0=int64#4

View File

@@ -107,13 +107,13 @@
# qhasm: stack64 q33_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
# qhasm: enter CRYPTO_NAMESPACE(batch_sc25519_barrett)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett:
.globl _CRYPTO_NAMESPACE(batch_sc25519_barrett)
.globl CRYPTO_NAMESPACE(batch_sc25519_barrett)
_CRYPTO_NAMESPACE(batch_sc25519_barrett):
CRYPTO_NAMESPACE(batch_sc25519_barrett):
mov %rsp,%r11
and $31,%r11
add $96,%r11
@@ -184,8 +184,8 @@ xor %r11,%r11
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3)
mulq CRYPTO_NAMESPACE(batch_MU3)(%rip)
# qhasm: q23 = rax
# asm 1: mov <rax=int64#7,>q23=int64#10
@@ -202,8 +202,8 @@ mov %rdx,%r13
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4)
mulq CRYPTO_NAMESPACE(batch_MU4)(%rip)
# qhasm: q24 = rax
# asm 1: mov <rax=int64#7,>q24=int64#12
@@ -225,8 +225,8 @@ adc %rdx,%r8
# asm 2: movq 32(<xp=%rsi),>rax=%rax
movq 32(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2)
mulq CRYPTO_NAMESPACE(batch_MU2)(%rip)
# qhasm: carry? q23 += rax
# asm 1: add <rax=int64#7,<q23=int64#10
@@ -248,8 +248,8 @@ adc %rdx,%r13
# asm 2: movq 32(<xp=%rsi),>rax=%rax
movq 32(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3)
mulq CRYPTO_NAMESPACE(batch_MU3)(%rip)
# qhasm: carry? q24 += rax
# asm 1: add <rax=int64#7,<q24=int64#12
@@ -281,8 +281,8 @@ adc %rdx,%r13
# asm 2: movq 32(<xp=%rsi),>rax=%rax
movq 32(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4)
mulq CRYPTO_NAMESPACE(batch_MU4)(%rip)
# qhasm: carry? q30 += rax
# asm 1: add <rax=int64#7,<q30=int64#5
@@ -309,8 +309,8 @@ adc %rdx,%r9
# asm 2: movq 40(<xp=%rsi),>rax=%rax
movq 40(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU1)
mulq CRYPTO_NAMESPACE(batch_MU1)(%rip)
# qhasm: carry? q23 += rax
# asm 1: add <rax=int64#7,<q23=int64#10
@@ -332,8 +332,8 @@ adc %rdx,%r13
# asm 2: movq 40(<xp=%rsi),>rax=%rax
movq 40(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2)
mulq CRYPTO_NAMESPACE(batch_MU2)(%rip)
# qhasm: carry? q24 += rax
# asm 1: add <rax=int64#7,<q24=int64#12
@@ -365,8 +365,8 @@ adc %rdx,%r13
# asm 2: movq 40(<xp=%rsi),>rax=%rax
movq 40(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3)
mulq CRYPTO_NAMESPACE(batch_MU3)(%rip)
# qhasm: carry? q30 += rax
# asm 1: add <rax=int64#7,<q30=int64#5
@@ -398,8 +398,8 @@ adc %rdx,%r13
# asm 2: movq 40(<xp=%rsi),>rax=%rax
movq 40(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4)
mulq CRYPTO_NAMESPACE(batch_MU4)(%rip)
# qhasm: carry? q31 += rax
# asm 1: add <rax=int64#7,<q31=int64#6
@@ -426,8 +426,8 @@ adc %rdx,%r10
# asm 2: movq 48(<xp=%rsi),>rax=%rax
movq 48(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU0)
mulq CRYPTO_NAMESPACE(batch_MU0)(%rip)
# qhasm: carry? q23 += rax
# asm 1: add <rax=int64#7,<q23=int64#10
@@ -449,8 +449,8 @@ adc %rdx,%r12
# asm 2: movq 48(<xp=%rsi),>rax=%rax
movq 48(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU1)
mulq CRYPTO_NAMESPACE(batch_MU1)(%rip)
# qhasm: carry? q24 += rax
# asm 1: add <rax=int64#7,<q24=int64#12
@@ -482,8 +482,8 @@ adc %rdx,%r12
# asm 2: movq 48(<xp=%rsi),>rax=%rax
movq 48(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2)
mulq CRYPTO_NAMESPACE(batch_MU2)(%rip)
# qhasm: carry? q30 += rax
# asm 1: add <rax=int64#7,<q30=int64#5
@@ -515,8 +515,8 @@ adc %rdx,%r12
# asm 2: movq 48(<xp=%rsi),>rax=%rax
movq 48(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3)
mulq CRYPTO_NAMESPACE(batch_MU3)(%rip)
# qhasm: carry? q31 += rax
# asm 1: add <rax=int64#7,<q31=int64#6
@@ -548,8 +548,8 @@ adc %rdx,%r12
# asm 2: movq 48(<xp=%rsi),>rax=%rax
movq 48(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4)
mulq CRYPTO_NAMESPACE(batch_MU4)(%rip)
# qhasm: carry? q32 += rax
# asm 1: add <rax=int64#7,<q32=int64#8
@@ -576,8 +576,8 @@ adc %rdx,%r11
# asm 2: movq 56(<xp=%rsi),>rax=%rax
movq 56(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU0)
mulq CRYPTO_NAMESPACE(batch_MU0)(%rip)
# qhasm: carry? q24 += rax
# asm 1: add <rax=int64#7,<q24=int64#12
@@ -601,8 +601,8 @@ adc %rdx,%r12
# asm 2: movq 56(<xp=%rsi),>rax=%rax
movq 56(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU1)
mulq CRYPTO_NAMESPACE(batch_MU1)(%rip)
# qhasm: carry? q30 += rax
# asm 1: add <rax=int64#7,<q30=int64#5
@@ -639,8 +639,8 @@ movq %r8,56(%rsp)
# asm 2: movq 56(<xp=%rsi),>rax=%rax
movq 56(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2)
mulq CRYPTO_NAMESPACE(batch_MU2)(%rip)
# qhasm: carry? q31 += rax
# asm 1: add <rax=int64#7,<q31=int64#6
@@ -677,8 +677,8 @@ movq %r9,64(%rsp)
# asm 2: movq 56(<xp=%rsi),>rax=%rax
movq 56(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3)
mulq CRYPTO_NAMESPACE(batch_MU3)(%rip)
# qhasm: carry? q32 += rax
# asm 1: add <rax=int64#7,<q32=int64#8
@@ -715,8 +715,8 @@ movq %r10,72(%rsp)
# asm 2: movq 56(<xp=%rsi),>rax=%rax
movq 56(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4)
mulq CRYPTO_NAMESPACE(batch_MU4)(%rip)
# qhasm: carry? q33 += rax
# asm 1: add <rax=int64#7,<q33=int64#9
@@ -743,8 +743,8 @@ movq %r11,80(%rsp)
# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip)
# qhasm: r20 = rax
# asm 1: mov <rax=int64#7,>r20=int64#5
@@ -761,8 +761,8 @@ mov %rdx,%r9
# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1)
mulq CRYPTO_NAMESPACE(batch_ORDER1)(%rip)
# qhasm: r21 = rax
# asm 1: mov <rax=int64#7,>r21=int64#8
@@ -789,8 +789,8 @@ adc %rdx,%r9
# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2)
mulq CRYPTO_NAMESPACE(batch_ORDER2)(%rip)
# qhasm: r22 = rax
# asm 1: mov <rax=int64#7,>r22=int64#9
@@ -817,8 +817,8 @@ adc %rdx,%r9
# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
movq 56(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER3)
mulq CRYPTO_NAMESPACE(batch_ORDER3)(%rip)
# qhasm: free rdx
@@ -837,8 +837,8 @@ add %r9,%r12
# asm 2: movq <q31_stack=64(%rsp),>rax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip)
# qhasm: carry? r21 += rax
# asm 1: add <rax=int64#7,<r21=int64#8
@@ -860,8 +860,8 @@ adc %rdx,%r9
# asm 2: movq <q31_stack=64(%rsp),>rax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1)
mulq CRYPTO_NAMESPACE(batch_ORDER1)(%rip)
# qhasm: carry? r22 += rax
# asm 1: add <rax=int64#7,<r22=int64#9
@@ -893,8 +893,8 @@ adc %rdx,%rcx
# asm 2: movq <q31_stack=64(%rsp),>rax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2)
mulq CRYPTO_NAMESPACE(batch_ORDER2)(%rip)
# qhasm: free rdx
@@ -913,8 +913,8 @@ add %rcx,%r12
# asm 2: movq <q32_stack=72(%rsp),>rax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip)
# qhasm: carry? r22 += rax
# asm 1: add <rax=int64#7,<r22=int64#9
@@ -936,8 +936,8 @@ adc %rdx,%rcx
# asm 2: movq <q32_stack=72(%rsp),>rax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1)
mulq CRYPTO_NAMESPACE(batch_ORDER1)(%rip)
# qhasm: free rdx
@@ -956,8 +956,8 @@ add %rcx,%r12
# asm 2: movq <q33_stack=80(%rsp),>rax=%rax
movq 80(%rsp),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip)
# qhasm: free rdx
@@ -1026,25 +1026,25 @@ sbb %r12,%rsi
# asm 2: mov <r3=%rsi,>t3=%r11
mov %rsi,%r11
# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#4
# asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rcx
sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,%rcx
# qhasm: carry? t0 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
# asm 1: sub CRYPTO_NAMESPACE(batch_ORDER0),<t0=int64#4
# asm 2: sub CRYPTO_NAMESPACE(batch_ORDER0),<t0=%rcx
sub CRYPTO_NAMESPACE(batch_ORDER0)(%rip),%rcx
# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#6
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%r9
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,%r9
# qhasm: carry? t1 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER1),<t1=int64#6
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER1),<t1=%r9
sbb CRYPTO_NAMESPACE(batch_ORDER1)(%rip),%r9
# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,%r10
# qhasm: carry? t2 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER2),<t2=int64#8
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER2),<t2=%r10
sbb CRYPTO_NAMESPACE(batch_ORDER2)(%rip),%r10
# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#9
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r11
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,%r11
# qhasm: unsigned<? t3 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER3) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER3),<t3=int64#9
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER3),<t3=%r11
sbb CRYPTO_NAMESPACE(batch_ORDER3)(%rip),%r11
# qhasm: r0 = t0 if !unsigned<
# asm 1: cmovae <t0=int64#4,<r0=int64#3
@@ -1086,25 +1086,25 @@ cmovae %r11,%rsi
# asm 2: mov <r3=%rsi,>t3=%r11
mov %rsi,%r11
# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#4
# asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rcx
sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,%rcx
# qhasm: carry? t0 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0)
# asm 1: sub CRYPTO_NAMESPACE(batch_ORDER0),<t0=int64#4
# asm 2: sub CRYPTO_NAMESPACE(batch_ORDER0),<t0=%rcx
sub CRYPTO_NAMESPACE(batch_ORDER0)(%rip),%rcx
# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#6
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%r9
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,%r9
# qhasm: carry? t1 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER1),<t1=int64#6
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER1),<t1=%r9
sbb CRYPTO_NAMESPACE(batch_ORDER1)(%rip),%r9
# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,%r10
# qhasm: carry? t2 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER2),<t2=int64#8
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER2),<t2=%r10
sbb CRYPTO_NAMESPACE(batch_ORDER2)(%rip),%r10
# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#9
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r11
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,%r11
# qhasm: unsigned<? t3 -= *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER3) - carry
# asm 1: sbb CRYPTO_NAMESPACE(batch_ORDER3),<t3=int64#9
# asm 2: sbb CRYPTO_NAMESPACE(batch_ORDER3),<t3=%r11
sbb CRYPTO_NAMESPACE(batch_ORDER3)(%rip),%r11
# qhasm: r0 = t0 if !unsigned<
# asm 1: cmovae <t0=int64#4,<r0=int64#3

View File

@@ -57,13 +57,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
# qhasm: enter CRYPTO_NAMESPACE(batch_sc25519_lt)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt:
.globl _CRYPTO_NAMESPACE(batch_sc25519_lt)
.globl CRYPTO_NAMESPACE(batch_sc25519_lt)
_CRYPTO_NAMESPACE(batch_sc25519_lt):
CRYPTO_NAMESPACE(batch_sc25519_lt):
mov %rsp,%r11
and $31,%r11
add $0,%r11

View File

@@ -1,6 +1,6 @@
#include "sc25519.h"
#define ull4_mul crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
#define ull4_mul CRYPTO_NAMESPACE(batch_ull4_mul)
extern void ull4_mul(unsigned long long r[8], const unsigned long long x[4], const unsigned long long y[4]);

View File

@@ -63,13 +63,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
# qhasm: enter CRYPTO_NAMESPACE(batch_sc25519_sub_nored)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored:
.globl _CRYPTO_NAMESPACE(batch_sc25519_sub_nored)
.globl CRYPTO_NAMESPACE(batch_sc25519_sub_nored)
_CRYPTO_NAMESPACE(batch_sc25519_sub_nored):
CRYPTO_NAMESPACE(batch_sc25519_sub_nored):
mov %rsp,%r11
and $31,%r11
add $0,%r11

View File

@@ -77,13 +77,13 @@
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
# qhasm: enter CRYPTO_NAMESPACE(batch_ull4_mul)
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
.globl crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
_crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul:
crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul:
.globl _CRYPTO_NAMESPACE(batch_ull4_mul)
.globl CRYPTO_NAMESPACE(batch_ull4_mul)
_CRYPTO_NAMESPACE(batch_ull4_mul):
CRYPTO_NAMESPACE(batch_ull4_mul):
mov %rsp,%r11
and $31,%r11
add $64,%r11