7 #ifndef SECP256K1_MODULE_BULLETPROOF_UTIL
8 #define SECP256K1_MODULE_BULLETPROOF_UTIL
34 #ifdef HAVE_BUILTIN_POPCOUNTL
35 return __builtin_popcountl(x);
39 for (i = 0; i < 64; i++) {
48 #ifdef HAVE_BUILTIN_CTZL
49 return __builtin_ctzl(x);
52 for (i = 0; i < 64; i++) {
53 if (x & (1ull << i)) {
64 secp256k1_scalar_clear(
r);
67 secp256k1_scalar_mul(&term, &a[n], &
b[n]);
68 secp256k1_scalar_add(
r,
r, &term);
85 secp256k1_scalar_mul(&
r[i], &
r[i - 1], &a[i]);
88 secp256k1_scalar_inverse_var(&u, &
r[--i]);
92 secp256k1_scalar_mul(&
r[j], &
r[i], &u);
93 secp256k1_scalar_mul(&u, &u, &a[j]);
100 const size_t bitveclen = (n + 7) / 8;
103 memset(out, 0, bitveclen);
104 for (i = 0; i < n; i++) {
107 secp256k1_fe_normalize(&pointx);
108 secp256k1_fe_get_b32(&out[bitveclen + i*32], &pointx);
109 if (!secp256k1_fe_is_quad_var(&pt[i].y)) {
110 out[i/8] |= (1ull << (i % 8));
116 const size_t bitveclen = (n + 7) / 8;
117 const size_t offset = bitveclen + i*32;
120 secp256k1_fe_set_b32(&fe, &data[offset]);
121 if (secp256k1_ge_set_xquad(pt, &fe)) {
122 if (data[i / 8] & (1 << (i % 8))) {
123 secp256k1_ge_neg(pt, pt);
131 static void secp256k1_bulletproof_update_commit(
unsigned char *commit,
const secp256k1_ge *lpt,
const secp256k1_ge *rpt) {
134 unsigned char lrparity;
135 lrparity = (!secp256k1_fe_is_quad_var(&lpt->
y) << 1) + !secp256k1_fe_is_quad_var(&rpt->
y);
136 secp256k1_sha256_initialize(&
sha256);
137 secp256k1_sha256_write(&
sha256, commit, 32);
138 secp256k1_sha256_write(&
sha256, &lrparity, 1);
140 secp256k1_fe_normalize(&pointx);
141 secp256k1_fe_get_b32(commit, &pointx);
142 secp256k1_sha256_write(&
sha256, commit, 32);
144 secp256k1_fe_normalize(&pointx);
145 secp256k1_fe_get_b32(commit, &pointx);
146 secp256k1_sha256_write(&
sha256, commit, 32);
147 secp256k1_sha256_finalize(&
sha256, commit);
150 static void secp256k1_bulletproof_update_commit_n(
unsigned char *commit,
const secp256k1_ge *pt,
size_t n) {
152 unsigned char lrparity = 0;
157 for (i = 0; i < n; i++) {
158 lrparity |= secp256k1_fe_is_quad_var(&pt[i].y) << i;
161 secp256k1_sha256_initialize(&
sha256);
162 secp256k1_sha256_write(&
sha256, commit, 32);
163 secp256k1_sha256_write(&
sha256, &lrparity, 1);
164 for (i = 0; i < n; i++) {
167 secp256k1_fe_normalize(&pointx);
168 secp256k1_fe_get_b32(commit, &pointx);
169 secp256k1_sha256_write(&
sha256, commit, 32);
171 secp256k1_sha256_finalize(&
sha256, commit);
190 g = &secp256k1_ge_const_g;
193 secp256k1_scalar_clear(&zero);
198 secp256k1_ecmult_const(
r, g, blind, 256);
208 secp256k1_ge_set_gej(&tmpge,
r);
209 secp256k1_gej_add_ge(
r,
r, g);
210 secp256k1_ge_set_gej(&rge,
r);
212 inf = secp256k1_ge_is_infinity(&rge);
213 secp256k1_fe_cmov(&rge.
x, &tmpge.
x, inf);
214 secp256k1_fe_cmov(&rge.
y, &tmpge.
y, inf);
218 secp256k1_ecmult_const(&tmpj, &gen[n], &s[n], 256);
219 secp256k1_gej_add_ge(
r, &tmpj, &rge);
222 secp256k1_ge_neg(&negg, g);
223 secp256k1_ge_set_gej(&tmpge,
r);
224 secp256k1_gej_add_ge(
r,
r, &negg);
225 secp256k1_ge_set_gej(&rge,
r);
227 secp256k1_fe_cmov(&rge.
x, &tmpge.
x, inf);
228 secp256k1_fe_cmov(&rge.
y, &tmpge.
y, inf);