7 #ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
8 #define _SECP256K1_SCALAR_REPR_IMPL_H_
13 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
14 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
15 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
16 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
19 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
20 #define SECP256K1_N_C_1 (~SECP256K1_N_1)
21 #define SECP256K1_N_C_2 (1)
24 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
25 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
26 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
27 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
45 return (a->
d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
51 if ((offset + count - 1) >> 6 == offset >> 6) {
52 return secp256k1_scalar_get_bits(a, offset, count);
55 return ((a->
d[offset >> 6] >> (offset & 0x3F)) | (a->
d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
74 r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
76 r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
78 r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
79 t += (uint64_t)
r->d[3];
80 r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
86 r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
88 r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
90 r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
92 r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
93 int overflow = t + secp256k1_scalar_check_overflow(
r);
95 secp256k1_scalar_reduce(
r, overflow);
102 r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
103 t += (
uint128_t)
r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
104 r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
105 t += (
uint128_t)
r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
106 r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
107 t += (
uint128_t)
r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
108 r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
115 static void secp256k1_scalar_set_b32(
secp256k1_scalar_t *
r,
const unsigned char *b32,
int *overflow) {
116 r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
117 r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
118 r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
119 r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
120 int over = secp256k1_scalar_reduce(
r, secp256k1_scalar_check_overflow(
r));
126 static void secp256k1_scalar_get_b32(
unsigned char *bin,
const secp256k1_scalar_t* a) {
127 bin[0] = a->
d[3] >> 56; bin[1] = a->
d[3] >> 48; bin[2] = a->
d[3] >> 40; bin[3] = a->
d[3] >> 32; bin[4] = a->
d[3] >> 24; bin[5] = a->
d[3] >> 16; bin[6] = a->
d[3] >> 8; bin[7] = a->
d[3];
128 bin[8] = a->
d[2] >> 56; bin[9] = a->
d[2] >> 48; bin[10] = a->
d[2] >> 40; bin[11] = a->
d[2] >> 32; bin[12] = a->
d[2] >> 24; bin[13] = a->
d[2] >> 16; bin[14] = a->
d[2] >> 8; bin[15] = a->
d[2];
129 bin[16] = a->
d[1] >> 56; bin[17] = a->
d[1] >> 48; bin[18] = a->
d[1] >> 40; bin[19] = a->
d[1] >> 32; bin[20] = a->
d[1] >> 24; bin[21] = a->
d[1] >> 16; bin[22] = a->
d[1] >> 8; bin[23] = a->
d[1];
130 bin[24] = a->
d[0] >> 56; bin[25] = a->
d[0] >> 48; bin[26] = a->
d[0] >> 40; bin[27] = a->
d[0] >> 32; bin[28] = a->
d[0] >> 24; bin[29] = a->
d[0] >> 16; bin[30] = a->
d[0] >> 8; bin[31] = a->
d[0];
134 return (a->
d[0] | a->
d[1] | a->
d[2] | a->
d[3]) == 0;
138 uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
140 r->d[0] = t & nonzero; t >>= 64;
142 r->d[1] = t & nonzero; t >>= 64;
144 r->d[2] = t & nonzero; t >>= 64;
146 r->d[3] = t & nonzero;
150 return ((a->
d[0] ^ 1) | a->
d[1] | a->
d[2] | a->
d[3]) == 0;
168 #define muladd(a,b) { \
171 uint128_t t = (uint128_t)a * b; \
176 th += (c0 < tl) ? 1 : 0; \
178 c2 += (c1 < th) ? 1 : 0; \
179 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
183 #define muladd_fast(a,b) { \
186 uint128_t t = (uint128_t)a * b; \
191 th += (c0 < tl) ? 1 : 0; \
193 VERIFY_CHECK(c1 >= th); \
197 #define muladd2(a,b) { \
200 uint128_t t = (uint128_t)a * b; \
204 uint64_t th2 = th + th; \
205 c2 += (th2 < th) ? 1 : 0; \
206 VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
207 uint64_t tl2 = tl + tl; \
208 th2 += (tl2 < tl) ? 1 : 0; \
210 th2 += (c0 < tl2) ? 1 : 0; \
211 c2 += (c0 < tl2) & (th2 == 0); \
212 VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
214 c2 += (c1 < th2) ? 1 : 0; \
215 VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
219 #define sumadd(a) { \
221 unsigned int over = (c0 < (a)) ? 1 : 0; \
223 c2 += (c1 < over) ? 1 : 0; \
227 #define sumadd_fast(a) { \
229 c1 += (c0 < (a)) ? 1 : 0; \
230 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
231 VERIFY_CHECK(c2 == 0); \
235 #define extract(n) { \
243 #define extract_fast(n) { \
247 VERIFY_CHECK(c2 == 0); \
251 uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
259 c0 = l[0]; c1 = 0; c2 = 0;
286 c0 = m0; c1 = 0; c2 = 0;
302 uint32_t p4 = c0 + m6;
308 r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
310 r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
312 r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
314 r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
317 secp256k1_scalar_reduce(
r, c + secp256k1_scalar_check_overflow(
r));
322 uint64_t c0 = 0, c1 = 0;
355 uint64_t c0 = 0, c1 = 0;
390 secp256k1_scalar_mul_512(l, a,
b);
391 secp256k1_scalar_reduce_512(
r, l);
396 secp256k1_scalar_sqr_512(l, a);
397 secp256k1_scalar_reduce_512(
r, l);
412 return ((a->
d[0] ^
b->d[0]) | (a->
d[1] ^
b->d[1]) | (a->
d[2] ^
b->d[2]) | (a->
d[3] ^
b->d[3])) == 0;
418 secp256k1_scalar_mul_512(l, a,
b);
419 unsigned int shiftlimbs = shift >> 6;
420 unsigned int shiftlow = shift & 0x3F;
421 unsigned int shifthigh = 64 - shiftlow;
422 r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
423 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
424 r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
425 r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
426 if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) {
427 secp256k1_scalar_add_bit(
r, 0);