7 #ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
8 #define _SECP256K1_SCALAR_REPR_IMPL_H_
11 #define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
12 #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
13 #define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
14 #define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
15 #define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
16 #define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
17 #define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
18 #define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
21 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
22 #define SECP256K1_N_C_1 (~SECP256K1_N_1)
23 #define SECP256K1_N_C_2 (~SECP256K1_N_2)
24 #define SECP256K1_N_C_3 (~SECP256K1_N_3)
25 #define SECP256K1_N_C_4 (1)
28 #define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
29 #define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
30 #define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
31 #define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
32 #define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
33 #define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
34 #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
35 #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
61 return (a->
d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
67 if ((offset + count - 1) >> 5 == offset >> 5) {
68 return secp256k1_scalar_get_bits(a, offset, count);
71 return ((a->
d[offset >> 5] >> (offset & 0x1F)) | (a->
d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1);
96 r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
98 r->d[1] = t & 0xFFFFFFFFUL; t >>= 32;
100 r->d[2] = t & 0xFFFFFFFFUL; t >>= 32;
102 r->d[3] = t & 0xFFFFFFFFUL; t >>= 32;
104 r->d[4] = t & 0xFFFFFFFFUL; t >>= 32;
105 t += (uint64_t)
r->d[5];
106 r->d[5] = t & 0xFFFFFFFFUL; t >>= 32;
107 t += (uint64_t)
r->d[6];
108 r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
109 t += (uint64_t)
r->d[7];
110 r->d[7] = t & 0xFFFFFFFFUL;
115 uint64_t t = (uint64_t)a->
d[0] +
b->d[0];
116 r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
117 t += (uint64_t)a->
d[1] +
b->d[1];
118 r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
119 t += (uint64_t)a->
d[2] +
b->d[2];
120 r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
121 t += (uint64_t)a->
d[3] +
b->d[3];
122 r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
123 t += (uint64_t)a->
d[4] +
b->d[4];
124 r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
125 t += (uint64_t)a->
d[5] +
b->d[5];
126 r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
127 t += (uint64_t)a->
d[6] +
b->d[6];
128 r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
129 t += (uint64_t)a->
d[7] +
b->d[7];
130 r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
131 int overflow = t + secp256k1_scalar_check_overflow(
r);
133 secp256k1_scalar_reduce(
r, overflow);
139 uint64_t t = (uint64_t)
r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
140 r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
141 t += (uint64_t)
r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F));
142 r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
143 t += (uint64_t)
r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F));
144 r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
145 t += (uint64_t)
r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F));
146 r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
147 t += (uint64_t)
r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F));
148 r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
149 t += (uint64_t)
r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F));
150 r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
151 t += (uint64_t)
r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F));
152 r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
153 t += (uint64_t)
r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
154 r->d[7] = t & 0xFFFFFFFFULL;
161 static void secp256k1_scalar_set_b32(
secp256k1_scalar_t *
r,
const unsigned char *b32,
int *overflow) {
162 r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24;
163 r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24;
164 r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24;
165 r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24;
166 r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24;
167 r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24;
168 r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24;
169 r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24;
170 int over = secp256k1_scalar_reduce(
r, secp256k1_scalar_check_overflow(
r));
176 static void secp256k1_scalar_get_b32(
unsigned char *bin,
const secp256k1_scalar_t* a) {
177 bin[0] = a->
d[7] >> 24; bin[1] = a->
d[7] >> 16; bin[2] = a->
d[7] >> 8; bin[3] = a->
d[7];
178 bin[4] = a->
d[6] >> 24; bin[5] = a->
d[6] >> 16; bin[6] = a->
d[6] >> 8; bin[7] = a->
d[6];
179 bin[8] = a->
d[5] >> 24; bin[9] = a->
d[5] >> 16; bin[10] = a->
d[5] >> 8; bin[11] = a->
d[5];
180 bin[12] = a->
d[4] >> 24; bin[13] = a->
d[4] >> 16; bin[14] = a->
d[4] >> 8; bin[15] = a->
d[4];
181 bin[16] = a->
d[3] >> 24; bin[17] = a->
d[3] >> 16; bin[18] = a->
d[3] >> 8; bin[19] = a->
d[3];
182 bin[20] = a->
d[2] >> 24; bin[21] = a->
d[2] >> 16; bin[22] = a->
d[2] >> 8; bin[23] = a->
d[2];
183 bin[24] = a->
d[1] >> 24; bin[25] = a->
d[1] >> 16; bin[26] = a->
d[1] >> 8; bin[27] = a->
d[1];
184 bin[28] = a->
d[0] >> 24; bin[29] = a->
d[0] >> 16; bin[30] = a->
d[0] >> 8; bin[31] = a->
d[0];
188 return (a->
d[0] | a->
d[1] | a->
d[2] | a->
d[3] | a->
d[4] | a->
d[5] | a->
d[6] | a->
d[7]) == 0;
192 uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
194 r->d[0] = t & nonzero; t >>= 32;
196 r->d[1] = t & nonzero; t >>= 32;
198 r->d[2] = t & nonzero; t >>= 32;
200 r->d[3] = t & nonzero; t >>= 32;
202 r->d[4] = t & nonzero; t >>= 32;
204 r->d[5] = t & nonzero; t >>= 32;
206 r->d[6] = t & nonzero; t >>= 32;
208 r->d[7] = t & nonzero;
212 return ((a->
d[0] ^ 1) | a->
d[1] | a->
d[2] | a->
d[3] | a->
d[4] | a->
d[5] | a->
d[6] | a->
d[7]) == 0;
236 #define muladd(a,b) { \
239 uint64_t t = (uint64_t)a * b; \
244 th += (c0 < tl) ? 1 : 0; \
246 c2 += (c1 < th) ? 1 : 0; \
247 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
251 #define muladd_fast(a,b) { \
254 uint64_t t = (uint64_t)a * b; \
259 th += (c0 < tl) ? 1 : 0; \
261 VERIFY_CHECK(c1 >= th); \
265 #define muladd2(a,b) { \
268 uint64_t t = (uint64_t)a * b; \
272 uint32_t th2 = th + th; \
273 c2 += (th2 < th) ? 1 : 0; \
274 VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
275 uint32_t tl2 = tl + tl; \
276 th2 += (tl2 < tl) ? 1 : 0; \
278 th2 += (c0 < tl2) ? 1 : 0; \
279 c2 += (c0 < tl2) & (th2 == 0); \
280 VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
282 c2 += (c1 < th2) ? 1 : 0; \
283 VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
287 #define sumadd(a) { \
289 unsigned int over = (c0 < (a)) ? 1 : 0; \
291 c2 += (c1 < over) ? 1 : 0; \
295 #define sumadd_fast(a) { \
297 c1 += (c0 < (a)) ? 1 : 0; \
298 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
299 VERIFY_CHECK(c2 == 0); \
303 #define extract(n) { \
311 #define extract_fast(n) { \
315 VERIFY_CHECK(c2 == 0); \
319 uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15];
326 c0 = l[0]; c1 = 0; c2 = 0;
391 c0 = m0; c1 = 0; c2 = 0;
431 uint32_t p8 = c0 + m12;
437 r->d[0] = c & 0xFFFFFFFFUL; c >>= 32;
439 r->d[1] = c & 0xFFFFFFFFUL; c >>= 32;
441 r->d[2] = c & 0xFFFFFFFFUL; c >>= 32;
443 r->d[3] = c & 0xFFFFFFFFUL; c >>= 32;
444 c += p4 + (uint64_t)p8;
445 r->d[4] = c & 0xFFFFFFFFUL; c >>= 32;
447 r->d[5] = c & 0xFFFFFFFFUL; c >>= 32;
449 r->d[6] = c & 0xFFFFFFFFUL; c >>= 32;
451 r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
454 secp256k1_scalar_reduce(
r, c + secp256k1_scalar_check_overflow(
r));
459 uint32_t c0 = 0, c1 = 0, c2 = 0;
547 uint32_t c0 = 0, c1 = 0, c2 = 0;
615 secp256k1_scalar_mul_512(l, a,
b);
616 secp256k1_scalar_reduce_512(
r, l);
621 secp256k1_scalar_sqr_512(l, a);
622 secp256k1_scalar_reduce_512(
r, l);
645 return ((a->
d[0] ^
b->d[0]) | (a->
d[1] ^
b->d[1]) | (a->
d[2] ^
b->d[2]) | (a->
d[3] ^
b->d[3]) | (a->
d[4] ^
b->d[4]) | (a->
d[5] ^
b->d[5]) | (a->
d[6] ^
b->d[6]) | (a->
d[7] ^
b->d[7])) == 0;
651 secp256k1_scalar_mul_512(l, a,
b);
652 unsigned int shiftlimbs = shift >> 5;
653 unsigned int shiftlow = shift & 0x1F;
654 unsigned int shifthigh = 32 - shiftlow;
655 r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
656 r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
657 r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
658 r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
659 r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
660 r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
661 r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
662 r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
663 if ((l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1) {
664 secp256k1_scalar_add_bit(
r, 0);