7 #ifndef SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
8 #define SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
23 #define IP_AB_SCALARS 4
77 unsigned char commit[32];
111 return 32 * (1 + 2 * n);
113 size_t bit_count = secp256k1_popcountl(n);
115 return 32 * (1 + 2 * (bit_count - 1 + log) +
IP_AB_SCALARS) + (2*log + 7) / 8;
142 if (idx < 2 * ctx->vec_len) {
146 const size_t lg_grouping = secp256k1_floor_lg(grouping);
151 if (idx < ctx->vec_len) {
152 *pt = ctx->
geng[idx];
157 secp256k1_scalar_clear(sc);
164 for (i = 0; i < ctx->
n_proofs; i++) {
180 const size_t cache_idx = secp256k1_popcountl(idx);
188 if (idx % (ctx->
vec_len / grouping) == 0) {
189 const size_t abinv_idx = idx / (ctx->
vec_len / grouping) - 1;
190 size_t prev_cache_idx;
205 prev_cache_idx = secp256k1_popcountl(idx - 1);
206 for (j = 0; j < (size_t) secp256k1_ctzl(idx) - lg_grouping; j++) {
208 secp256k1_scalar_sqr(&yinvn, &yinvn);
210 if (lg_grouping == 1) {
212 secp256k1_scalar_sqr(&yinvn, &yinvn);
215 prev_cache_idx = cache_idx - 1;
219 secp256k1_scalar_mul(
229 }
else if (idx < ctx->vec_len) {
230 const size_t xsq_idx = secp256k1_ctzl(idx);
233 const size_t xsqinv_idx = secp256k1_ctzl(idx);
244 if (idx < ctx->vec_len / grouping && secp256k1_popcountl(idx) == ctx->
lg_vec_len - 1) {
245 const size_t xsqinv_idx = secp256k1_ctzl(~idx);
257 secp256k1_scalar_add(&term, &term, &rangeproof_offset);
260 secp256k1_scalar_add(sc, sc, &term);
264 size_t real_idx = idx - 2 * ctx->
vec_len;
265 const size_t proof_idx = real_idx / (2 * ctx->
lg_vec_len);
267 if (!secp256k1_bulletproof_deserialize_point(
276 *sc = ctx->
proof[proof_idx].
xsq[real_idx / 2];
280 secp256k1_scalar_mul(sc, sc, &ctx->
randomizer[proof_idx]);
289 secp256k1_scalar_clear(sc);
290 for (i = 0; i < ctx->
n_proofs; i++) {
295 secp256k1_scalar_add(sc, sc, &term);
298 size_t proof_idx = 0;
320 unsigned char commit[32];
321 size_t total_n_points = 2 * vec_len + !!shared_g + 1;
334 if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (
sizeof(*ecmult_data.
randomizer) +
sizeof(*ecmult_data.
proof)), 2)) {
338 secp256k1_scalar_clear(&zero);
342 ecmult_data.
genh = gens->
gens + gens->
n / 2;
349 secp256k1_sha256_initialize(&
sha256);
350 for (i = 0; i < n_proofs; i++) {
351 secp256k1_sha256_write(&
sha256, proof[i].proof, plen);
352 secp256k1_sha256_write(&
sha256, proof[i].commit, 32);
353 secp256k1_scalar_get_b32(commit, &proof[i].p_offs);
354 secp256k1_sha256_write(&
sha256, commit, 32);
356 secp256k1_sha256_finalize(&
sha256, commit);
358 secp256k1_scalar_clear(&ecmult_data.
p_offs);
359 for (i = 0; i < n_proofs; i++) {
360 const unsigned char *serproof = proof[i].
proof;
361 unsigned char proof_commit[32];
373 secp256k1_scalar_set_b32(&dot, serproof, &overflow);
375 secp256k1_scratch_deallocate_frame(scratch);
379 secp256k1_sha256_initialize(&
sha256);
380 secp256k1_sha256_write(&
sha256, proof[i].commit, 32);
381 secp256k1_sha256_write(&
sha256, serproof, 32);
382 secp256k1_sha256_finalize(&
sha256, proof_commit);
386 for (j = 0; j < n_ab; j++) {
387 secp256k1_scalar_set_b32(&ab[j], serproof, &overflow);
389 secp256k1_scratch_deallocate_frame(scratch);
394 if (secp256k1_scalar_is_zero(&ab[j])) {
395 secp256k1_scratch_deallocate_frame(scratch);
400 secp256k1_scalar_dot_product(&negprod, &ab[0], &ab[n_ab / 2], n_ab / 2);
404 secp256k1_sha256_initialize(&
sha256);
405 secp256k1_sha256_write(&
sha256, commit, 32);
406 secp256k1_sha256_finalize(&
sha256, commit);
407 secp256k1_scalar_set_b32(&ecmult_data.
randomizer[i], commit, &overflow);
408 if (overflow || secp256k1_scalar_is_zero(&ecmult_data.
randomizer[i])) {
410 secp256k1_scratch_deallocate_frame(scratch);
415 secp256k1_scalar_set_b32(&x, proof_commit, &overflow);
416 if (overflow || secp256k1_scalar_is_zero(&x)) {
417 secp256k1_scratch_deallocate_frame(scratch);
420 secp256k1_scalar_negate(&negprod, &negprod);
421 secp256k1_scalar_add(&negprod, &negprod, &dot);
422 secp256k1_scalar_mul(&x, &x, &negprod);
423 secp256k1_scalar_add(&x, &x, &proof[i].p_offs);
425 secp256k1_scalar_mul(&x, &x, &ecmult_data.
randomizer[i]);
426 secp256k1_scalar_add(&ecmult_data.
p_offs, &ecmult_data.
p_offs, &x);
431 if (!secp256k1_scalar_is_zero(&negprod)) {
432 secp256k1_scratch_deallocate_frame(scratch);
444 negprod = ab[n_ab - 1];
446 for (j = 0; j < ecmult_data.
lg_vec_len; j++) {
448 const size_t lidx = 2 * j;
449 const size_t ridx = 2 * j + 1;
450 const size_t bitveclen = (2 * ecmult_data.
lg_vec_len + 7) / 8;
451 const unsigned char lrparity = 2 * !!(serproof[lidx / 8] & (1 << (lidx % 8))) + !!(serproof[ridx / 8] & (1 << (ridx % 8)));
453 secp256k1_sha256_initialize(&
sha256);
454 secp256k1_sha256_write(&
sha256, proof_commit, 32);
455 secp256k1_sha256_write(&
sha256, &lrparity, 1);
456 secp256k1_sha256_write(&
sha256, &serproof[32 * lidx + bitveclen], 32);
457 secp256k1_sha256_write(&
sha256, &serproof[32 * ridx + bitveclen], 32);
458 secp256k1_sha256_finalize(&
sha256, proof_commit);
460 secp256k1_scalar_set_b32(&xi, proof_commit, &overflow);
461 if (overflow || secp256k1_scalar_is_zero(&xi)) {
462 secp256k1_scratch_deallocate_frame(scratch);
465 secp256k1_scalar_mul(&ab[n_ab - 1], &ab[n_ab - 1], &xi);
466 secp256k1_scalar_sqr(&ecmult_data.
proof[i].
xsq[j], &xi);
470 secp256k1_scalar_inverse_all_var(ecmult_data.
proof[i].
abinv, ab, n_ab);
471 ab[n_ab - 1] = negprod;
478 for (j = n_ab - 1; j > 0; j--) {
483 prev_idx = j & (j - 1);
485 secp256k1_scalar_mul(
493 secp256k1_scalar_mul(&negprod, &ecmult_data.
randomizer[i], &ab[0]);
494 secp256k1_scalar_sqr(&negprod, &negprod);
499 if (secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &
r, NULL, secp256k1_bulletproof_innerproduct_vfy_ecmult_callback, (
void *) &ecmult_data, total_n_points) != 1) {
500 secp256k1_scratch_deallocate_frame(scratch);
503 secp256k1_scratch_deallocate_frame(scratch);
504 return secp256k1_gej_is_infinity(&
r);
540 const size_t ab_idx = (idx / ctx->
grouping) ^ 1;
551 if ((idx / ctx->
grouping) % 2 == 0) {
552 *pt = ctx->
genh[idx];
553 *sc = ctx->
b[ab_idx];
555 secp256k1_scalar_mul(sc, sc, &ctx->
yinvn);
557 *pt = ctx->
geng[idx];
558 *sc = ctx->
a[ab_idx];
562 for (i = 0; (1u << i) < ctx->
grouping; i++) {
563 size_t grouping = (1u << i);
564 if ((((idx / grouping) % 2) ^ ((idx / ctx->
grouping) % 2)) == 0) {
565 secp256k1_scalar_mul(sc, sc, &ctx->
x[i]);
567 secp256k1_scalar_mul(sc, sc, &ctx->
xinv[i]);
579 const size_t ab_idx = (idx / ctx->
grouping) ^ 1;
590 if ((idx / ctx->
grouping) % 2 == 1) {
591 *pt = ctx->
genh[idx];
592 *sc = ctx->
b[ab_idx];
594 secp256k1_scalar_mul(sc, sc, &ctx->
yinvn);
596 *pt = ctx->
geng[idx];
597 *sc = ctx->
a[ab_idx];
601 for (i = 0; (1u << i) < ctx->
grouping; i++) {
602 size_t grouping = (1u << i);
603 if ((((idx / grouping) % 2) ^ ((idx / ctx->
grouping) % 2)) == 1) {
604 secp256k1_scalar_mul(sc, sc, &ctx->
x[i]);
606 secp256k1_scalar_mul(sc, sc, &ctx->
xinv[i]);
618 *pt = ctx->
geng[idx];
619 secp256k1_scalar_set_int(sc, 1);
620 for (i = 0; (1u << i) <= ctx->
grouping; i++) {
621 if (idx & (1u << i)) {
622 secp256k1_scalar_mul(sc, sc, &ctx->
x[i]);
624 secp256k1_scalar_mul(sc, sc, &ctx->
xinv[i]);
634 *pt = ctx->
genh[idx];
635 secp256k1_scalar_set_int(sc, 1);
636 for (i = 0; (1u << i) <= ctx->
grouping; i++) {
637 if (idx & (1u << i)) {
638 secp256k1_scalar_mul(sc, sc, &ctx->
xinv[i]);
640 secp256k1_scalar_mul(sc, sc, &ctx->
x[i]);
643 secp256k1_scalar_mul(sc, sc, &ctx->
yinvn);
651 static int secp256k1_bulletproof_inner_product_real_prove_impl(
const secp256k1_ecmult_context *ecmult_ctx,
secp256k1_scratch *scratch,
secp256k1_ge *out_pt,
size_t *pt_idx,
const secp256k1_ge *g,
secp256k1_ge *geng,
secp256k1_ge *genh,
secp256k1_scalar *a_arr,
secp256k1_scalar *b_arr,
const secp256k1_scalar *yinv,
const secp256k1_scalar *ux,
const size_t n,
unsigned char *commit) {
665 for (halfwidth = n / 2, i = 0; halfwidth >
IP_AB_SCALARS / 4; halfwidth /= 2, i++) {
673 secp256k1_scalar_clear(&pfdata.
g_sc);
674 for (j = 0; j < halfwidth; j++) {
676 secp256k1_scalar_mul(&prod, &a_arr[2*j], &b_arr[2*j + 1]);
677 secp256k1_scalar_add(&pfdata.
g_sc, &pfdata.
g_sc, &prod);
679 secp256k1_scalar_mul(&pfdata.
g_sc, &pfdata.
g_sc, ux);
681 secp256k1_scalar_set_int(&pfdata.
yinvn, 1);
682 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmplj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l, (
void *) &pfdata, n + 1);
683 secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmplj);
686 secp256k1_scalar_clear(&pfdata.
g_sc);
687 for (j = 0; j < halfwidth; j++) {
689 secp256k1_scalar_mul(&prod, &a_arr[2*j + 1], &b_arr[2*j]);
690 secp256k1_scalar_add(&pfdata.
g_sc, &pfdata.
g_sc, &prod);
692 secp256k1_scalar_mul(&pfdata.
g_sc, &pfdata.
g_sc, ux);
694 secp256k1_scalar_set_int(&pfdata.
yinvn, 1);
695 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmprj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r, (
void *) &pfdata, n + 1);
696 secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmprj);
699 secp256k1_bulletproof_update_commit(commit, &out_pt[*pt_idx - 2], &out_pt[*pt_idx] - 1);
700 secp256k1_scalar_set_b32(&pfdata.
x[i], commit, &overflow);
701 if (overflow || secp256k1_scalar_is_zero(&pfdata.
x[i])) {
704 secp256k1_scalar_inverse_var(&pfdata.
xinv[i], &pfdata.
x[i]);
707 for (j = 0; j < halfwidth; j++) {
709 secp256k1_scalar_mul(&a_arr[2*j], &a_arr[2*j], &pfdata.
x[i]);
710 secp256k1_scalar_mul(&tmps, &a_arr[2*j + 1], &pfdata.
xinv[i]);
711 secp256k1_scalar_add(&a_arr[j], &a_arr[2*j], &tmps);
713 secp256k1_scalar_mul(&b_arr[2*j], &b_arr[2*j], &pfdata.
xinv[i]);
714 secp256k1_scalar_mul(&tmps, &b_arr[2*j + 1], &pfdata.
x[i]);
715 secp256k1_scalar_add(&b_arr[j], &b_arr[2*j], &tmps);
720 if ((n > 2048 && i == 3) || (n > 128 && i == 2) || (n > 32 && i == 1)) {
723 for (j = 0; j < halfwidth; j++) {
725 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g, (
void *) &pfdata, 2u << i);
726 pfdata.
geng += 2u << i;
727 secp256k1_ge_set_gej(&geng[j], &rj);
728 secp256k1_scalar_set_int(&pfdata.
yinvn, 1);
729 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h, (
void *) &pfdata, 2u << i);
730 pfdata.
genh += 2u << i;
731 secp256k1_ge_set_gej(&genh[j], &rj);
734 secp256k1_scalar_sqr(&yinv2, yinv);
735 for (j = 0; j < i; j++) {
736 secp256k1_scalar_sqr(&yinv2, &yinv2);
738 if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, pt_idx, g, geng, genh, a_arr, b_arr, &yinv2, ux, halfwidth, commit)) {
750 unsigned char commit[32];
772 for (i = 0; i < n; i++) {
773 cb(&a[i], NULL, 2*i, cb_data);
774 cb(&
b[i], NULL, 2*i+1, cb_data);
777 secp256k1_scalar_dot_product(&dot, a,
b, n);
778 secp256k1_scalar_get_b32(proof, &dot);
780 for (i = 0; i < n; i++) {
781 secp256k1_scalar_get_b32(&proof[32 * (i + 1)], &a[i]);
782 secp256k1_scalar_get_b32(&proof[32 * (i + n + 1)], &
b[i]);
802 for (i = 0; i < n; i++) {
803 cb(&a_arr[i], NULL, 2*i, cb_data);
804 cb(&b_arr[i], NULL, 2*i+1, cb_data);
805 geng[i] = gens->
gens[i];
806 genh[i] = gens->
gens[i + gens->
n/2];
810 secp256k1_scalar_dot_product(&dot, a_arr, b_arr, n);
811 secp256k1_scalar_get_b32(proof, &dot);
814 secp256k1_sha256_initialize(&
sha256);
815 secp256k1_sha256_write(&
sha256, commit_inp, 32);
816 secp256k1_sha256_write(&
sha256, proof, 32);
817 secp256k1_sha256_finalize(&
sha256, commit);
821 secp256k1_scalar_set_b32(&ux, commit, &overflow);
822 if (overflow || secp256k1_scalar_is_zero(&ux)) {
824 secp256k1_scratch_deallocate_frame(scratch);
828 if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, &pt_idx, gens->
blinding_gen, geng, genh, a_arr, b_arr, yinv, &ux, n, commit)) {
829 secp256k1_scratch_deallocate_frame(scratch);
834 for (i = 0; i < half_n_ab; i++) {
835 secp256k1_scalar_get_b32(&proof[32 * i], &a_arr[i]);
836 secp256k1_scalar_get_b32(&proof[32 * (i + half_n_ab)], &b_arr[i]);
838 proof += 64 * half_n_ab;
839 secp256k1_bulletproof_serialize_points(proof, out_pt, pt_idx);
841 secp256k1_scratch_deallocate_frame(scratch);