7 #ifndef _SECP256K1_FIELD_REPR_IMPL_H_
8 #define _SECP256K1_FIELD_REPR_IMPL_H_
16 static mp_limb_t secp256k1_field_pc[(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS];
18 static void secp256k1_fe_inner_start(
void) {
19 for (
int i=0; i<(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; i++)
20 secp256k1_field_pc[i] = 0;
21 secp256k1_field_pc[0] += 0x3D1UL;
22 secp256k1_field_pc[32/GMP_NUMB_BITS] += (((mp_limb_t)1) << (32 % GMP_NUMB_BITS));
24 secp256k1_field_p[i] = 0;
26 mpn_sub(secp256k1_field_p, secp256k1_field_p,
FIELD_LIMBS, secp256k1_field_pc, (33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS);
29 static void secp256k1_fe_inner_stop(
void) {
34 #if (GMP_NUMB_BITS >= 40)
39 mpn_add_1(
r->n+(32/GMP_NUMB_BITS),
r->n+(32/GMP_NUMB_BITS),
FIELD_LIMBS-(32/GMP_NUMB_BITS),
r->n[
FIELD_LIMBS] << (32 % GMP_NUMB_BITS));
41 mpn_add_1(
r->n+(32/GMP_NUMB_BITS),
r->n+(32/GMP_NUMB_BITS),
FIELD_LIMBS-(32/GMP_NUMB_BITS), carry << (32%GMP_NUMB_BITS));
63 ret &= (a->
n[i] == 0);
74 ret &= (a->
n[i] ==
b->n[i]);
80 if (a->
n[i] >
b->n[i])
return 1;
81 if (a->
n[i] <
b->n[i])
return -1;
86 static int secp256k1_fe_set_b32(
secp256k1_fe_t *
r,
const unsigned char *a) {
89 for (
int i=0; i<256; i++) {
90 int limb = i/GMP_NUMB_BITS;
91 int shift = i%GMP_NUMB_BITS;
92 r->n[limb] |= (mp_limb_t)((a[31-i/8] >> (i%8)) & 0x1) << shift;
94 return (mpn_cmp(
r->n, secp256k1_field_p,
FIELD_LIMBS) < 0);
98 static void secp256k1_fe_get_b32(
unsigned char *
r,
const secp256k1_fe_t *a) {
99 for (
int i=0; i<32; i++) {
101 for (
int j=0; j<8; j++) {
102 int limb = (8*i+j)/GMP_NUMB_BITS;
103 int shift = (8*i+j)%GMP_NUMB_BITS;
104 c |= ((a->
n[limb] >> shift) & 0x1) << j;
113 secp256k1_fe_normalize(
r);
115 r->n[i] = ~(
r->n[i]);
116 #if (GMP_NUMB_BITS >= 33)
120 mpn_sub_1(
r->n+(32/GMP_NUMB_BITS),
r->n+(32/GMP_NUMB_BITS),
FIELD_LIMBS-(32/GMP_NUMB_BITS), 0x1UL << (32%GMP_NUMB_BITS));
139 #if (GMP_NUMB_BITS >= 33)
143 mpn_addmul_1(tmp+(32/GMP_NUMB_BITS), tmp+
FIELD_LIMBS,
FIELD_LIMBS-(32/GMP_NUMB_BITS), 0x1UL << (32%GMP_NUMB_BITS));
145 mp_limb_t q[1+(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS];
146 q[(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS] = mpn_mul_1(q, secp256k1_field_pc, (33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS, o);
147 #if (GMP_NUMB_BITS <= 32)
148 mp_limb_t o2 = tmp[2*
FIELD_LIMBS-(32/GMP_NUMB_BITS)] << (32%GMP_NUMB_BITS);
149 q[(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS] += mpn_addmul_1(q, secp256k1_field_pc, (33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS, o2);
158 secp256k1_fe_normalize(&ac);
159 secp256k1_fe_normalize(&bc);
162 secp256k1_fe_reduce(
r, tmp);
167 secp256k1_fe_normalize(&ac);
170 secp256k1_fe_reduce(
r, tmp);
174 mp_limb_t mask0 = flag + ~((mp_limb_t)0), mask1 = ~mask0;
176 r->n[i] = (
r->n[i] & mask0) | (a->
n[i] & mask1);