PRCYCoin  2.0.0.7rc1
P2P Digital Currency
inner_product_impl.h
Go to the documentation of this file.
1 /**********************************************************************
2  * Copyright (c) 2018 Andrew Poelstra *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
8 #define SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
9 
10 #include "group.h"
11 #include "scalar.h"
12 
15 
16 /* Number of scalars that should remain at the end of a recursive proof. The paper
17  * uses 2, by reducing the scalars as far as possible. We stop one recursive step
18  * early, trading two points (L, R) for two scalars, which reduces verification
19  * and prover cost.
20  *
21  * For the most part, all comments assume this value is at 4.
22  */
23 #define IP_AB_SCALARS 4
24 
25 /* Bulletproof inner products consist of the four scalars and `2[log2(n) - 1]` points
26  * `a_1`, `a_2`, `b_1`, `b_2`, `L_i` and `R_i`, where `i` ranges from 0 to `log2(n)-1`.
27  *
28  * The prover takes as input a point `P` and scalar `c`. It proves that it knows
29  * scalars `a_i`, `b_i` for `i` ranging from 1 to `n`, such that
30  * `P = sum_i [a_i G_i + b_i H_i]` and `<{a_i}, {b_i}> = c`,
31  * where `G_i` and `H_i` are standard NUMS generators.
32  *
33  * Verification of the proof comes down to a single multiexponentiation of the form
34  *
35  * P + (c - a_1*b_1 - a_2*b_2)*x*G
36  * - sum_{i=1}^n [s'_i*G_i + s_i*H_i]
37  * + sum_{i=1}^log2(n) [x_i^-2 L_i + x_i^2 R_i]
38  *
39  * which will equal infinity if the inner product proof is correct. Here
40  * - `G` is the standard secp generator
41  * - `x` is a hash of `commit` and is used to rerandomize `c`. See Protocol 2 vs Protocol 1 in the paper.
42  * - `x_i = H(x_{i-1} || L_i || R_i)`, where `x_{-1}` is passed through the `commit` variable and
43  * must be a commitment to `P` and `c`.
44  * - `s_i` and `s'_i` are computed as follows.
45  *
46  * Letting `i_j` be defined as 1 if `i & 2^j == 1`, and -1 otherwise,
47  * - For `i` from `1` to `n/2`, `s'_i = a_1 * prod_{j=1}^log2(n) x_j^i_j`
48  * - For `i` from `n/2 + 1` to `n`, `s'_i = a_2 * prod_{j=1}^log2(n) x_j^i_j`
49  * - For `i` from `1` to `n/2`, `s_i = b_1 * prod_{j=1}^log2(n) x_j^-i_j`
50  * - For `i` from `n/2 + 1` to `n`, `s_i = b_2 * prod_{j=1}^log2(n) x_j^-i_j`
51  *
52  * Observe that these can be computed iteratively by labelling the coefficients `s_i` for `i`
53  * from `0` to `2n-1` rather than 1-indexing and distinguishing between `s_i'`s and `s_i`s:
54  *
55  * Start with `s_0 = a_1 * prod_{j=1}^log2(n) x_j^-1`, then for later `s_i`s,
56  * - For `i` from `1` to `n/2 - 1`, multiply some earlier `s'_j` by some `x_k^2`
57  * - For `i = n/2`, multiply `s_{i-1} by `a_2/a_1`.
58  * - For `i` from `n/2 + 1` to `n - 1`, multiply some earlier `s'_j` by some `x_k^2`
59  * - For `i = n`, multiply `s'_{i-1}` by `b_1/a_2` to get `s_i`.
60  * - For `i` from `n + 1` to `3n/2 - 1`, multiply some earlier `s_j` by some `x_k^-2`
61  * - For `i = 3n/2`, multiply `s_{i-1}` by `b_2/b_1`.
62  * - For `i` from `3n/2 + 1` to `2n - 1`, multiply some earlier `s_j` by some `x_k^-2`
63  * where of course, the indices `j` and `k` must be chosen carefully.
64  *
65  * The bulk of `secp256k1_bulletproof_innerproduct_vfy_ecmult_callback` involves computing
66  * these indices, given `a_2/a_1`, `b_1/a_1`, `b_2/b_1`, and the `x_k^2`s as input. It
67  * computes `x_k^-2` as a side-effect of its other computation.
68  */
69 
70 typedef int (secp256k1_bulletproof_vfy_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data);
71 
72 /* used by callers to wrap a proof with surrounding context */
73 typedef struct {
74  const unsigned char *proof;
77  unsigned char commit[32];
82 
83 /* used internally */
84 typedef struct {
92  const unsigned char *serialized_lr;
94 
95 /* used by callers to modify the multiexp */
96 typedef struct {
97  size_t n_proofs;
99  const secp256k1_ge *g;
102  size_t vec_len;
103  size_t lg_vec_len;
104  int shared_g;
108 
110  if (n < IP_AB_SCALARS / 2) {
111  return 32 * (1 + 2 * n);
112  } else {
113  size_t bit_count = secp256k1_popcountl(n);
114  size_t log = secp256k1_floor_lg(2 * n / IP_AB_SCALARS);
115  return 32 * (1 + 2 * (bit_count - 1 + log) + IP_AB_SCALARS) + (2*log + 7) / 8;
116  }
117 }
118 
119 /* Our ecmult_multi function takes `(c - a*b)*x` directly and multiplies this by `G`. For every other
120  * (scalar, point) pair it calls the following callback function, which takes an index and outputs a
121  * pair. The function therefore has three regimes:
122  *
123  * For the first `n` invocations, it returns `(s'_i, G_i)` for `i` from 1 to `n`.
124  * For the next `n` invocations, it returns `(s_i, H_i)` for `i` from 1 to `n`.
125  * For the next `2*log2(n)` invocations it returns `(x_i^-2, L_i)` and `(x_i^2, R_i)`,
126  * alternating between the two choices, for `i` from 1 to `log2(n)`.
127  *
128  * For the remaining invocations it passes through to another callback, `rangeproof_cb_data` which
129  * computes `P`. The reason for this is that in practice `P` is usually defined by another multiexp
130  * rather than being a known point, and it is more efficient to compute one exponentiation.
131  *
132  * Inline we refer to the first `2n` coefficients as `s_i` for `i` from 0 to `2n-1`, since that
133  * is the more convenient indexing. In particular we describe (a) how the indices `j` and `k`,
134  * from the big comment block above, are chosen; and (b) when/how each `x_k^-2` is computed.
135  */
136 static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
138 
139  /* First 2N points use the standard Gi, Hi generators, and the scalars can be aggregated across proofs.
140  * Inside this if clause, `idx` corresponds to the index `i` in the big comment, and runs from 0 to `2n-1`.
141  * Also `ctx->vec_len` corresponds to `n`. */
142  if (idx < 2 * ctx->vec_len) {
143  /* Number of `a` scalars in the proof (same as number of `b` scalars in the proof). Will
144  * be 2 except for very small proofs that have fewer than 2 scalars as input. */
145  const size_t grouping = ctx->vec_len < IP_AB_SCALARS / 2 ? ctx->vec_len : IP_AB_SCALARS / 2;
146  const size_t lg_grouping = secp256k1_floor_lg(grouping);
147  size_t i;
148  VERIFY_CHECK(lg_grouping == 0 || lg_grouping == 1); /* TODO support higher IP_AB_SCALARS */
149 
150  /* Determine whether we're multiplying by `G_i`s or `H_i`s. */
151  if (idx < ctx->vec_len) {
152  *pt = ctx->geng[idx];
153  } else {
154  *pt = ctx->genh[idx - ctx->vec_len];
155  }
156 
157  secp256k1_scalar_clear(sc);
158  /* Loop over all the different inner product proofs we might be doing at once. Since they
159  * share generators `G_i` and `H_i`, we compute all of their scalars at once and add them.
160  * For each proof we start with the "seed value" `ctx->proof[i].xcache[0]` (see next comment
161  * for its meaning) from which every other scalar derived. We expect the caller to have
162  * randomized this to ensure that this wanton addition cannot enable cancellation attacks.
163  */
164  for (i = 0; i < ctx->n_proofs; i++) {
165  /* To recall from the introductory comment: most `s_i` values are computed by taking an
166  * earlier `s_j` value and multiplying it by some `x_k^2`.
167  *
168  * We now explain the index `j`: it is the largest number with one fewer 1-bits than `i`.
169  * Alternately, the most recently returned `s_j` where `j` has one fewer 1-bits than `i`.
170  *
171  * To ensure that `s_j` is available when we need it, on each iteration we define the
172  * variable `cache_idx` which simply counts the 1-bits in `i`; before returning `s_i`
173  * we store it in `ctx->proof[i].xcache[cache_idx]`. Then later, when we want "most
174  * recently returned `s_j` with one fewer 1-bits than `i`, it'll be sitting right
175  * there in `ctx->proof[i].xcache[cache_idx - 1]`.
176  *
177  * Note that `ctx->proof[i].xcache[0]` will always equal `-a_1 * prod_{i=1}^{n-1} x_i^-2`,
178  * and we expect the caller to have set this.
179  */
180  const size_t cache_idx = secp256k1_popcountl(idx);
181  secp256k1_scalar term;
183  /* For the special case `cache_idx == 0` (which is true iff `idx == 0`) there is nothing to do. */
184  if (cache_idx > 0) {
185  /* Otherwise, check if this is one of the special indices where we transition from `a_1` to `a_2`,
186  * from `a_2` to `b_1`, or from `b_1` to `b_2`. (For small proofs there is only one transition,
187  * from `a` to `b`.) */
188  if (idx % (ctx->vec_len / grouping) == 0) {
189  const size_t abinv_idx = idx / (ctx->vec_len / grouping) - 1;
190  size_t prev_cache_idx;
191  /* Check if it's the even specialer index where we're transitioning from `a`s to `b`s, from
192  * `G`s to `H`s, and from `x_k^2`s to `x_k^-2`s. In rangeproof and circuit applications,
193  * the caller secretly has a variable `y` such that `H_i` is really `y^-i H_i` for `i` ranging
194  * from 0 to `n-1`. Rather than forcing the caller to tweak every `H_i` herself, which would
195  * be very slow and prevent precomputation, we instead multiply our cached `x_k^-2` values
196  * by `y^(-2^k)` respectively, which will ultimately result in every `s_i` we return having
197  * been multiplied by `y^-i`.
198  *
199  * This is an underhanded trick but the result is that all `n` powers of `y^-i` show up
200  * in the right place, and we only need log-many scalar squarings and multiplications.
201  */
202  if (idx == ctx->vec_len) {
203  secp256k1_scalar yinvn = ctx->proof[i].proof->yinv;
204  size_t j;
205  prev_cache_idx = secp256k1_popcountl(idx - 1);
206  for (j = 0; j < (size_t) secp256k1_ctzl(idx) - lg_grouping; j++) {
207  secp256k1_scalar_mul(&ctx->proof[i].xsqinvy[j], &ctx->proof[i].xsqinv[j], &yinvn);
208  secp256k1_scalar_sqr(&yinvn, &yinvn);
209  }
210  if (lg_grouping == 1) {
211  secp256k1_scalar_mul(&ctx->proof[i].abinv[2], &ctx->proof[i].abinv[2], &yinvn);
212  secp256k1_scalar_sqr(&yinvn, &yinvn);
213  }
214  } else {
215  prev_cache_idx = cache_idx - 1;
216  }
217  /* Regardless of specialness, we multiply by `a_2/a_1` or whatever the appropriate multiplier
218  * is. We expect the caller to have given these to us in the `ctx->proof[i].abinv` array. */
219  secp256k1_scalar_mul(
220  &ctx->proof[i].xcache[cache_idx],
221  &ctx->proof[i].xcache[prev_cache_idx],
222  &ctx->proof[i].abinv[abinv_idx]
223  );
224  /* If it's *not* a special index, just multiply by the appropriate `x_k^2`, or `x_k^-2` in case
225  * we're in the `H_i` half of the multiexp. At this point we can explain the index `k`, which
226  * is computed in the variable `xsq_idx` (`xsqinv_idx` respectively). In light of our discussion
227  * of `j`, we see that this should be "the least significant bit that's 1 in `i` but not `i-1`."
228  * In other words, it is the number of trailing 0 bits in the index `i`. */
229  } else if (idx < ctx->vec_len) {
230  const size_t xsq_idx = secp256k1_ctzl(idx);
231  secp256k1_scalar_mul(&ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xcache[cache_idx - 1], &ctx->proof[i].xsq[xsq_idx]);
232  } else {
233  const size_t xsqinv_idx = secp256k1_ctzl(idx);
234  secp256k1_scalar_mul(&ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xcache[cache_idx - 1], &ctx->proof[i].xsqinvy[xsqinv_idx]);
235  }
236  }
237  term = ctx->proof[i].xcache[cache_idx];
238 
239  /* One last trick: compute `x_k^-2` while computing the `G_i` scalars, so that they'll be
240  * available when we need them for the `H_i` scalars. We can do this for every `i` value
241  * that has exactly one 0-bit, i.e. which is a product of all `x_i`s and one `x_k^-1`. By
242  * multiplying that by the special value `prod_{i=1}^n x_i^-1` we obtain simply `x_k^-2`.
243  * We expect the caller to give us this special value in `ctx->proof[i].xsqinv_mask`. */
244  if (idx < ctx->vec_len / grouping && secp256k1_popcountl(idx) == ctx->lg_vec_len - 1) {
245  const size_t xsqinv_idx = secp256k1_ctzl(~idx);
246  secp256k1_scalar_mul(&ctx->proof[i].xsqinv[xsqinv_idx], &ctx->proof[i].xcache[cache_idx], &ctx->proof[i].xsqinv_mask);
247  }
248 
249  /* Finally, if the caller, in its computation of `P`, wants to multiply `G_i` or `H_i` by some scalar,
250  * we add that to our sum as well. Again, we trust the randomization in `xcache[0]` to prevent any
251  * cancellation attacks here. */
252  if (ctx->proof[i].proof->rangeproof_cb != NULL) {
253  secp256k1_scalar rangeproof_offset;
254  if ((ctx->proof[i].proof->rangeproof_cb)(&rangeproof_offset, NULL, &ctx->randomizer[i], idx, ctx->proof[i].proof->rangeproof_cb_data) != 1) {
255  return 0;
256  }
257  secp256k1_scalar_add(&term, &term, &rangeproof_offset);
258  }
259 
260  secp256k1_scalar_add(sc, sc, &term);
261  }
262  /* Next 2lgN points are the L and R vectors */
263  } else if (idx < 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) {
264  size_t real_idx = idx - 2 * ctx->vec_len;
265  const size_t proof_idx = real_idx / (2 * ctx->lg_vec_len);
266  real_idx = real_idx % (2 * ctx->lg_vec_len);
267  if (!secp256k1_bulletproof_deserialize_point(
268  pt,
269  ctx->proof[proof_idx].serialized_lr,
270  real_idx,
271  2 * ctx->lg_vec_len
272  )) {
273  return 0;
274  }
275  if (idx % 2 == 0) {
276  *sc = ctx->proof[proof_idx].xsq[real_idx / 2];
277  } else {
278  *sc = ctx->proof[proof_idx].xsqinv[real_idx / 2];
279  }
280  secp256k1_scalar_mul(sc, sc, &ctx->randomizer[proof_idx]);
281  /* After the G's, H's, L's and R's, do the blinding_gen */
282  } else if (idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) {
283  *sc = ctx->p_offs;
284  *pt = *ctx->g;
285  /* Remaining points are whatever the rangeproof wants */
286  } else if (ctx->shared_g && idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) + 1) {
287  /* Special case: the first extra point is independent of the proof, for both rangeproof and circuit */
288  size_t i;
289  secp256k1_scalar_clear(sc);
290  for (i = 0; i < ctx->n_proofs; i++) {
291  secp256k1_scalar term;
292  if ((ctx->proof[i].proof->rangeproof_cb)(&term, pt, &ctx->randomizer[i], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[i].proof->rangeproof_cb_data) != 1) {
293  return 0;
294  }
295  secp256k1_scalar_add(sc, sc, &term);
296  }
297  } else {
298  size_t proof_idx = 0;
299  size_t real_idx = idx - 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) - 1 - !!ctx->shared_g;
300  while (real_idx >= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g) {
301  real_idx -= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g;
302  proof_idx++;
303  VERIFY_CHECK(proof_idx < ctx->n_proofs);
304  }
305  if ((ctx->proof[proof_idx].proof->rangeproof_cb)(sc, pt, &ctx->randomizer[proof_idx], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[proof_idx].proof->rangeproof_cb_data) != 1) {
306  return 0;
307  }
308  }
309 
310  return 1;
311 }
312 
313 /* nb For security it is essential that `commit_inp` already commit to all data
314  * needed to compute `P`. We do not hash it in during verification since `P`
315  * may be specified indirectly as a bunch of scalar offsets.
316  */
317 static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const secp256k1_bulletproof_generators *gens, size_t vec_len, const secp256k1_bulletproof_innerproduct_context *proof, size_t n_proofs, size_t plen, int shared_g) {
320  unsigned char commit[32];
321  size_t total_n_points = 2 * vec_len + !!shared_g + 1; /* +1 for shared G (value_gen), +1 for H (blinding_gen) */
323  secp256k1_scalar zero;
324  size_t i;
325 
327  return 0;
328  }
329 
330  if (n_proofs == 0) {
331  return 1;
332  }
333 
334  if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (sizeof(*ecmult_data.randomizer) + sizeof(*ecmult_data.proof)), 2)) {
335  return 0;
336  }
337 
338  secp256k1_scalar_clear(&zero);
339  ecmult_data.n_proofs = n_proofs;
340  ecmult_data.g = gens->blinding_gen;
341  ecmult_data.geng = gens->gens;
342  ecmult_data.genh = gens->gens + gens->n / 2;
343  ecmult_data.vec_len = vec_len;
344  ecmult_data.lg_vec_len = secp256k1_floor_lg(2 * vec_len / IP_AB_SCALARS);
345  ecmult_data.shared_g = shared_g;
346  ecmult_data.randomizer = (secp256k1_scalar *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data.randomizer));
347  ecmult_data.proof = (secp256k1_bulletproof_innerproduct_vfy_data *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data.proof));
348  /* Seed RNG for per-proof randomizers */
349  secp256k1_sha256_initialize(&sha256);
350  for (i = 0; i < n_proofs; i++) {
351  secp256k1_sha256_write(&sha256, proof[i].proof, plen);
352  secp256k1_sha256_write(&sha256, proof[i].commit, 32);
353  secp256k1_scalar_get_b32(commit, &proof[i].p_offs);
354  secp256k1_sha256_write(&sha256, commit, 32);
355  }
356  secp256k1_sha256_finalize(&sha256, commit);
357 
358  secp256k1_scalar_clear(&ecmult_data.p_offs);
359  for (i = 0; i < n_proofs; i++) {
360  const unsigned char *serproof = proof[i].proof;
361  unsigned char proof_commit[32];
362  secp256k1_scalar dot;
364  secp256k1_scalar negprod;
366  int overflow;
367  size_t j;
368  const size_t n_ab = 2 * vec_len < IP_AB_SCALARS ? 2 * vec_len : IP_AB_SCALARS;
369 
370  total_n_points += 2 * ecmult_data.lg_vec_len + proof[i].n_extra_rangeproof_points - !!shared_g; /* -1 for shared G */
371 
372  /* Extract dot product, will always be the first 32 bytes */
373  secp256k1_scalar_set_b32(&dot, serproof, &overflow);
374  if (overflow) {
375  secp256k1_scratch_deallocate_frame(scratch);
376  return 0;
377  }
378  /* Commit to dot product */
379  secp256k1_sha256_initialize(&sha256);
380  secp256k1_sha256_write(&sha256, proof[i].commit, 32);
381  secp256k1_sha256_write(&sha256, serproof, 32);
382  secp256k1_sha256_finalize(&sha256, proof_commit);
383  serproof += 32;
384 
385  /* Extract a, b */
386  for (j = 0; j < n_ab; j++) {
387  secp256k1_scalar_set_b32(&ab[j], serproof, &overflow);
388  if (overflow) {
389  secp256k1_scratch_deallocate_frame(scratch);
390  return 0;
391  }
392  /* TODO our verifier currently bombs out with zeros because it uses
393  * scalar inverses gratuitously. Fix that. */
394  if (secp256k1_scalar_is_zero(&ab[j])) {
395  secp256k1_scratch_deallocate_frame(scratch);
396  return 0;
397  }
398  serproof += 32;
399  }
400  secp256k1_scalar_dot_product(&negprod, &ab[0], &ab[n_ab / 2], n_ab / 2);
401 
402  ecmult_data.proof[i].proof = &proof[i];
403  /* set per-proof randomizer */
404  secp256k1_sha256_initialize(&sha256);
405  secp256k1_sha256_write(&sha256, commit, 32);
406  secp256k1_sha256_finalize(&sha256, commit);
407  secp256k1_scalar_set_b32(&ecmult_data.randomizer[i], commit, &overflow);
408  if (overflow || secp256k1_scalar_is_zero(&ecmult_data.randomizer[i])) {
409  /* cryptographically unreachable */
410  secp256k1_scratch_deallocate_frame(scratch);
411  return 0;
412  }
413 
414  /* Compute x*(dot - a*b) for each proof; add it and p_offs to the p_offs accumulator */
415  secp256k1_scalar_set_b32(&x, proof_commit, &overflow);
416  if (overflow || secp256k1_scalar_is_zero(&x)) {
417  secp256k1_scratch_deallocate_frame(scratch);
418  return 0;
419  }
420  secp256k1_scalar_negate(&negprod, &negprod);
421  secp256k1_scalar_add(&negprod, &negprod, &dot);
422  secp256k1_scalar_mul(&x, &x, &negprod);
423  secp256k1_scalar_add(&x, &x, &proof[i].p_offs);
424 
425  secp256k1_scalar_mul(&x, &x, &ecmult_data.randomizer[i]);
426  secp256k1_scalar_add(&ecmult_data.p_offs, &ecmult_data.p_offs, &x);
427 
428  /* Special-case: trivial proofs are valid iff the explicitly revealed scalars
429  * dot to the explicitly revealed dot product. */
430  if (2 * vec_len <= IP_AB_SCALARS) {
431  if (!secp256k1_scalar_is_zero(&negprod)) {
432  secp256k1_scratch_deallocate_frame(scratch);
433  return 0;
434  }
435  /* remaining data does not (and cannot) be computed for proofs with no a's or b's. */
436  if (vec_len == 0) {
437  continue;
438  }
439  }
440 
441  /* Compute the inverse product and the array of squares; the rest will be filled
442  * in by the callback during the multiexp. */
443  ecmult_data.proof[i].serialized_lr = serproof; /* bookmark L/R location in proof */
444  negprod = ab[n_ab - 1];
445  ab[n_ab - 1] = ecmult_data.randomizer[i]; /* build r * x1 * x2 * ... * xn in last slot of `ab` array */
446  for (j = 0; j < ecmult_data.lg_vec_len; j++) {
447  secp256k1_scalar xi;
448  const size_t lidx = 2 * j;
449  const size_t ridx = 2 * j + 1;
450  const size_t bitveclen = (2 * ecmult_data.lg_vec_len + 7) / 8;
451  const unsigned char lrparity = 2 * !!(serproof[lidx / 8] & (1 << (lidx % 8))) + !!(serproof[ridx / 8] & (1 << (ridx % 8)));
452  /* Map commit -> H(commit || LR parity || Lx || Rx), compute xi from it */
453  secp256k1_sha256_initialize(&sha256);
454  secp256k1_sha256_write(&sha256, proof_commit, 32);
455  secp256k1_sha256_write(&sha256, &lrparity, 1);
456  secp256k1_sha256_write(&sha256, &serproof[32 * lidx + bitveclen], 32);
457  secp256k1_sha256_write(&sha256, &serproof[32 * ridx + bitveclen], 32);
458  secp256k1_sha256_finalize(&sha256, proof_commit);
459 
460  secp256k1_scalar_set_b32(&xi, proof_commit, &overflow);
461  if (overflow || secp256k1_scalar_is_zero(&xi)) {
462  secp256k1_scratch_deallocate_frame(scratch);
463  return 0;
464  }
465  secp256k1_scalar_mul(&ab[n_ab - 1], &ab[n_ab - 1], &xi);
466  secp256k1_scalar_sqr(&ecmult_data.proof[i].xsq[j], &xi);
467  }
468  /* Compute inverse of all a's and b's, except the last b whose inverse is not needed.
469  * Also compute the inverse of (-r * x1 * ... * xn) which will be needed */
470  secp256k1_scalar_inverse_all_var(ecmult_data.proof[i].abinv, ab, n_ab);
471  ab[n_ab - 1] = negprod;
472 
473  /* Compute (-a0 * r * x1 * ... * xn)^-1 which will be used to mask out individual x_i^-2's */
474  secp256k1_scalar_negate(&ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].abinv[0]);
475  secp256k1_scalar_mul(&ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].xsqinv_mask, &ecmult_data.proof[i].abinv[n_ab - 1]);
476 
477  /* Compute each scalar times the previous' inverse, which is used to switch between a's and b's */
478  for (j = n_ab - 1; j > 0; j--) {
479  size_t prev_idx;
480  if (j == n_ab / 2) {
481  prev_idx = j - 1; /* we go from a_{n-1} to b_0 */
482  } else {
483  prev_idx = j & (j - 1); /* but from a_i' to a_i, where i' is i with its lowest set bit unset */
484  }
485  secp256k1_scalar_mul(
486  &ecmult_data.proof[i].abinv[j - 1],
487  &ecmult_data.proof[i].abinv[prev_idx],
488  &ab[j]
489  );
490  }
491 
492  /* Extract -a0 * r * (x1 * ... * xn)^-1 which is our first coefficient. Use negprod as a dummy */
493  secp256k1_scalar_mul(&negprod, &ecmult_data.randomizer[i], &ab[0]); /* r*a */
494  secp256k1_scalar_sqr(&negprod, &negprod); /* (r*a)^2 */
495  secp256k1_scalar_mul(&ecmult_data.proof[i].xcache[0], &ecmult_data.proof[i].xsqinv_mask, &negprod); /* -a * r * (x1 * x2 * ... * xn)^-1 */
496  }
497 
498  /* Do the multiexp */
499  if (secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &r, NULL, secp256k1_bulletproof_innerproduct_vfy_ecmult_callback, (void *) &ecmult_data, total_n_points) != 1) {
500  secp256k1_scratch_deallocate_frame(scratch);
501  return 0;
502  }
503  secp256k1_scratch_deallocate_frame(scratch);
504  return secp256k1_gej_is_infinity(&r);
505 }
506 
507 typedef struct {
514  const secp256k1_ge *g;
518  size_t grouping;
519  size_t n;
521 
522 /* At each level i of recursion (i from 0 upto lg(vector size) - 1)
523  * L = a_even . G_odd + b_odd . H_even (18)
524  * which, by expanding the generators into the original G's and H's
525  * and setting n = (1 << i), can be computed as follows:
526  *
527  * For j from 1 to [vector size],
528  * 1. Use H[j] or G[j] as generator, starting with H and switching
529  * every n.
530  * 2. Start with b1 with H and a0 with G, and increment by 2 each switch.
531  * 3. For k = 1, 2, 4, ..., n/2, use the same algorithm to choose
532  * between a and b to choose between x and x^-1, except using
533  * k in place of n. With H's choose x then x^-1, with G's choose
534  * x^-1 then x.
535  *
536  * For R everything is the same except swap G/H and a/b and x/x^-1.
537  */
538 static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
540  const size_t ab_idx = (idx / ctx->grouping) ^ 1;
541  size_t i;
542 
543  /* Special-case the primary generator */
544  if (idx == ctx->n) {
545  *pt = *ctx->g;
546  *sc = ctx->g_sc;
547  return 1;
548  }
549 
550  /* steps 1/2 */
551  if ((idx / ctx->grouping) % 2 == 0) {
552  *pt = ctx->genh[idx];
553  *sc = ctx->b[ab_idx];
554  /* Map h -> h' (eqn 59) */
555  secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
556  } else {
557  *pt = ctx->geng[idx];
558  *sc = ctx->a[ab_idx];
559  }
560 
561  /* step 3 */
562  for (i = 0; (1u << i) < ctx->grouping; i++) {
563  size_t grouping = (1u << i);
564  if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 0) {
565  secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
566  } else {
567  secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
568  }
569  }
570 
571  secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
572  return 1;
573 }
574 
575 /* Identical code except `== 0` changed to `== 1` twice, and the
576  * `+ 1` from Step 1/2 was moved to the other if branch. */
577 static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
579  const size_t ab_idx = (idx / ctx->grouping) ^ 1;
580  size_t i;
581 
582  /* Special-case the primary generator */
583  if (idx == ctx->n) {
584  *pt = *ctx->g;
585  *sc = ctx->g_sc;
586  return 1;
587  }
588 
589  /* steps 1/2 */
590  if ((idx / ctx->grouping) % 2 == 1) {
591  *pt = ctx->genh[idx];
592  *sc = ctx->b[ab_idx];
593  /* Map h -> h' (eqn 59) */
594  secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
595  } else {
596  *pt = ctx->geng[idx];
597  *sc = ctx->a[ab_idx];
598  }
599 
600  /* step 3 */
601  for (i = 0; (1u << i) < ctx->grouping; i++) {
602  size_t grouping = (1u << i);
603  if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 1) {
604  secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
605  } else {
606  secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
607  }
608  }
609 
610  secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
611  return 1;
612 }
613 
614 static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
616  size_t i;
617 
618  *pt = ctx->geng[idx];
619  secp256k1_scalar_set_int(sc, 1);
620  for (i = 0; (1u << i) <= ctx->grouping; i++) {
621  if (idx & (1u << i)) {
622  secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
623  } else {
624  secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
625  }
626  }
627  return 1;
628 }
629 
630 static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
632  size_t i;
633 
634  *pt = ctx->genh[idx];
635  secp256k1_scalar_set_int(sc, 1);
636  for (i = 0; (1u << i) <= ctx->grouping; i++) {
637  if (idx & (1u << i)) {
638  secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
639  } else {
640  secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
641  }
642  }
643  secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
644  secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
645  return 1;
646 }
647 
648 /* These proofs are not zero-knowledge. There is no need to worry about constant timeness.
649  * `commit_inp` must contain 256 bits of randomness, it is used immediately as a randomizer.
650  */
651 static int secp256k1_bulletproof_inner_product_real_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, secp256k1_ge *out_pt, size_t *pt_idx, const secp256k1_ge *g, secp256k1_ge *geng, secp256k1_ge *genh, secp256k1_scalar *a_arr, secp256k1_scalar *b_arr, const secp256k1_scalar *yinv, const secp256k1_scalar *ux, const size_t n, unsigned char *commit) {
652  size_t i;
653  size_t halfwidth;
654 
656  pfdata.yinv = *yinv;
657  pfdata.g = g;
658  pfdata.geng = geng;
659  pfdata.genh = genh;
660  pfdata.a = a_arr;
661  pfdata.b = b_arr;
662  pfdata.n = n;
663 
664  /* Protocol 1: Iterate, halving vector size until it is 1 */
665  for (halfwidth = n / 2, i = 0; halfwidth > IP_AB_SCALARS / 4; halfwidth /= 2, i++) {
666  secp256k1_gej tmplj, tmprj;
667  size_t j;
668  int overflow;
669 
670  pfdata.grouping = 1u << i;
671 
672  /* L */
673  secp256k1_scalar_clear(&pfdata.g_sc);
674  for (j = 0; j < halfwidth; j++) {
675  secp256k1_scalar prod;
676  secp256k1_scalar_mul(&prod, &a_arr[2*j], &b_arr[2*j + 1]);
677  secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod);
678  }
679  secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux);
680 
681  secp256k1_scalar_set_int(&pfdata.yinvn, 1);
682  secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmplj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l, (void *) &pfdata, n + 1);
683  secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmplj);
684 
685  /* R */
686  secp256k1_scalar_clear(&pfdata.g_sc);
687  for (j = 0; j < halfwidth; j++) {
688  secp256k1_scalar prod;
689  secp256k1_scalar_mul(&prod, &a_arr[2*j + 1], &b_arr[2*j]);
690  secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod);
691  }
692  secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux);
693 
694  secp256k1_scalar_set_int(&pfdata.yinvn, 1);
695  secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmprj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r, (void *) &pfdata, n + 1);
696  secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmprj);
697 
698  /* x, x^2, x^-1, x^-2 */
699  secp256k1_bulletproof_update_commit(commit, &out_pt[*pt_idx - 2], &out_pt[*pt_idx] - 1);
700  secp256k1_scalar_set_b32(&pfdata.x[i], commit, &overflow);
701  if (overflow || secp256k1_scalar_is_zero(&pfdata.x[i])) {
702  return 0;
703  }
704  secp256k1_scalar_inverse_var(&pfdata.xinv[i], &pfdata.x[i]);
705 
706  /* update scalar array */
707  for (j = 0; j < halfwidth; j++) {
708  secp256k1_scalar tmps;
709  secp256k1_scalar_mul(&a_arr[2*j], &a_arr[2*j], &pfdata.x[i]);
710  secp256k1_scalar_mul(&tmps, &a_arr[2*j + 1], &pfdata.xinv[i]);
711  secp256k1_scalar_add(&a_arr[j], &a_arr[2*j], &tmps);
712 
713  secp256k1_scalar_mul(&b_arr[2*j], &b_arr[2*j], &pfdata.xinv[i]);
714  secp256k1_scalar_mul(&tmps, &b_arr[2*j + 1], &pfdata.x[i]);
715  secp256k1_scalar_add(&b_arr[j], &b_arr[2*j], &tmps);
716 
717  }
718 
719  /* Combine G generators and recurse, if that would be more optimal */
720  if ((n > 2048 && i == 3) || (n > 128 && i == 2) || (n > 32 && i == 1)) {
721  secp256k1_scalar yinv2;
722 
723  for (j = 0; j < halfwidth; j++) {
724  secp256k1_gej rj;
725  secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g, (void *) &pfdata, 2u << i);
726  pfdata.geng += 2u << i;
727  secp256k1_ge_set_gej(&geng[j], &rj);
728  secp256k1_scalar_set_int(&pfdata.yinvn, 1);
729  secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, NULL, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h, (void *) &pfdata, 2u << i);
730  pfdata.genh += 2u << i;
731  secp256k1_ge_set_gej(&genh[j], &rj);
732  }
733 
734  secp256k1_scalar_sqr(&yinv2, yinv);
735  for (j = 0; j < i; j++) {
736  secp256k1_scalar_sqr(&yinv2, &yinv2);
737  }
738  if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, pt_idx, g, geng, genh, a_arr, b_arr, &yinv2, ux, halfwidth, commit)) {
739  return 0;
740  }
741  break;
742  }
743  }
744  return 1;
745 }
746 
747 static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *proof_len, const secp256k1_bulletproof_generators *gens, const secp256k1_scalar *yinv, const size_t n, secp256k1_ecmult_multi_callback *cb, void *cb_data, const unsigned char *commit_inp) {
749  size_t i;
750  unsigned char commit[32];
751  secp256k1_scalar *a_arr;
752  secp256k1_scalar *b_arr;
753  secp256k1_ge *out_pt;
754  secp256k1_ge *geng;
755  secp256k1_ge *genh;
756  secp256k1_scalar ux;
757  int overflow;
758  size_t pt_idx = 0;
759  secp256k1_scalar dot;
760  size_t half_n_ab = n < IP_AB_SCALARS / 2 ? n : IP_AB_SCALARS / 2;
761 
763  return 0;
764  }
766 
767  /* Special-case lengths 0 and 1 whose proofs are just explicit lists of scalars */
768  if (n <= IP_AB_SCALARS / 2) {
771 
772  for (i = 0; i < n; i++) {
773  cb(&a[i], NULL, 2*i, cb_data);
774  cb(&b[i], NULL, 2*i+1, cb_data);
775  }
776 
777  secp256k1_scalar_dot_product(&dot, a, b, n);
778  secp256k1_scalar_get_b32(proof, &dot);
779 
780  for (i = 0; i < n; i++) {
781  secp256k1_scalar_get_b32(&proof[32 * (i + 1)], &a[i]);
782  secp256k1_scalar_get_b32(&proof[32 * (i + n + 1)], &b[i]);
783  }
784  VERIFY_CHECK(*proof_len == 32 * (2 * n + 1));
785  return 1;
786  }
787 
788  /* setup for nontrivial proofs */
789  if (!secp256k1_scratch_allocate_frame(scratch, 2 * n * (sizeof(secp256k1_scalar) + sizeof(secp256k1_ge)) + 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge), 5)) {
790  return 0;
791  }
792 
793  a_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_scalar));
794  b_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_scalar));
795  geng = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_ge));
796  genh = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_ge));
797  out_pt = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge));
798  VERIFY_CHECK(a_arr != NULL);
799  VERIFY_CHECK(b_arr != NULL);
800  VERIFY_CHECK(gens != NULL);
801 
802  for (i = 0; i < n; i++) {
803  cb(&a_arr[i], NULL, 2*i, cb_data);
804  cb(&b_arr[i], NULL, 2*i+1, cb_data);
805  geng[i] = gens->gens[i];
806  genh[i] = gens->gens[i + gens->n/2];
807  }
808 
809  /* Record final dot product */
810  secp256k1_scalar_dot_product(&dot, a_arr, b_arr, n);
811  secp256k1_scalar_get_b32(proof, &dot);
812 
813  /* Protocol 2: hash dot product to obtain G-randomizer */
814  secp256k1_sha256_initialize(&sha256);
815  secp256k1_sha256_write(&sha256, commit_inp, 32);
816  secp256k1_sha256_write(&sha256, proof, 32);
817  secp256k1_sha256_finalize(&sha256, commit);
818 
819  proof += 32;
820 
821  secp256k1_scalar_set_b32(&ux, commit, &overflow);
822  if (overflow || secp256k1_scalar_is_zero(&ux)) {
823  /* cryptographically unreachable */
824  secp256k1_scratch_deallocate_frame(scratch);
825  return 0;
826  }
827 
828  if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, &pt_idx, gens->blinding_gen, geng, genh, a_arr, b_arr, yinv, &ux, n, commit)) {
829  secp256k1_scratch_deallocate_frame(scratch);
830  return 0;
831  }
832 
833  /* Final a/b values */
834  for (i = 0; i < half_n_ab; i++) {
835  secp256k1_scalar_get_b32(&proof[32 * i], &a_arr[i]);
836  secp256k1_scalar_get_b32(&proof[32 * (i + half_n_ab)], &b_arr[i]);
837  }
838  proof += 64 * half_n_ab;
839  secp256k1_bulletproof_serialize_points(proof, out_pt, pt_idx);
840 
841  secp256k1_scratch_deallocate_frame(scratch);
842  return 1;
843 }
844 
845 #undef IP_AB_SCALARS
846 
847 #endif
secp256k1_scratch_space_struct2
Definition: scratch.h:14
secp256k1_ecmult_multi_callback
int() secp256k1_ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
Definition: ecmult.h:33
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::shared_g
int shared_g
Definition: inner_product_impl.h:104
secp256k1_bulletproof_innerproduct_vfy_data::xsq
secp256k1_scalar xsq[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
Definition: inner_product_impl.h:87
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::p_offs
secp256k1_scalar p_offs
Definition: inner_product_impl.h:98
secp256k1_bulletproof_innerproduct_pf_ecmult_context::genh
const secp256k1_ge * genh
Definition: inner_product_impl.h:513
VERIFY_CHECK
#define VERIFY_CHECK(cond)
Definition: util.h:61
secp256k1_bulletproof_innerproduct_pf_ecmult_context::n
size_t n
Definition: inner_product_impl.h:519
secp256k1_bulletproof_innerproduct_vfy_data
Definition: inner_product_impl.h:84
secp256k1_bulletproof_innerproduct_vfy_ecmult_context
Definition: inner_product_impl.h:96
secp256k1_bulletproof_innerproduct_pf_ecmult_context::x
secp256k1_scalar x[SECP256K1_BULLETPROOF_MAX_DEPTH]
Definition: inner_product_impl.h:508
b
void const uint64_t * b
Definition: field_5x52_asm_impl.h:10
secp256k1_bulletproof_generators::n
size_t n
Definition: main_impl.h:55
secp256k1_bulletproof_innerproduct_context::p_offs
secp256k1_scalar p_offs
Definition: inner_product_impl.h:75
sha256
Internal SHA-256 implementation.
Definition: sha256.cpp:15
secp256k1_bulletproof_innerproduct_context
Definition: inner_product_impl.h:73
secp256k1_bulletproof_innerproduct_context::proof
const unsigned char * proof
Definition: inner_product_impl.h:74
secp256k1_sha256
Definition: hash.h:13
secp256k1_bulletproof_innerproduct_vfy_data::xsqinv_mask
secp256k1_scalar xsqinv_mask
Definition: inner_product_impl.h:91
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::geng
const secp256k1_ge * geng
Definition: inner_product_impl.h:100
secp256k1_bulletproof_innerproduct_vfy_data::proof
const secp256k1_bulletproof_innerproduct_context * proof
Definition: inner_product_impl.h:85
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::randomizer
secp256k1_scalar * randomizer
Definition: inner_product_impl.h:105
secp256k1_bulletproof_generators::gens
secp256k1_ge * gens
Definition: main_impl.h:57
r
void const uint64_t uint64_t * r
Definition: field_5x52_asm_impl.h:10
secp256k1_bulletproof_innerproduct_context::rangeproof_cb
secp256k1_bulletproof_vfy_callback * rangeproof_cb
Definition: inner_product_impl.h:78
secp256k1_scalar
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
secp256k1_bulletproof_innerproduct_pf_ecmult_context::yinvn
secp256k1_scalar yinvn
Definition: inner_product_impl.h:511
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::lg_vec_len
size_t lg_vec_len
Definition: inner_product_impl.h:103
secp256k1_gej
A group element of the secp256k1 curve, in jacobian coordinates.
Definition: group.h:24
secp256k1_bulletproof_innerproduct_proof_length
size_t secp256k1_bulletproof_innerproduct_proof_length(size_t n)
Definition: inner_product_impl.h:109
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::vec_len
size_t vec_len
Definition: inner_product_impl.h:102
secp256k1_bulletproof_innerproduct_pf_ecmult_context::a
const secp256k1_scalar * a
Definition: inner_product_impl.h:515
secp256k1_bulletproof_innerproduct_vfy_data::abinv
secp256k1_scalar abinv[IP_AB_SCALARS]
Definition: inner_product_impl.h:86
secp256k1_bulletproof_innerproduct_context::rangeproof_cb_data
void * rangeproof_cb_data
Definition: inner_product_impl.h:79
IP_AB_SCALARS
#define IP_AB_SCALARS
Definition: inner_product_impl.h:23
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::proof
secp256k1_bulletproof_innerproduct_vfy_data * proof
Definition: inner_product_impl.h:106
util.h
secp256k1_bulletproof_innerproduct_pf_ecmult_context::yinv
secp256k1_scalar yinv
Definition: inner_product_impl.h:510
secp256k1_bulletproof_generators::blinding_gen
secp256k1_ge * blinding_gen
Definition: main_impl.h:62
secp256k1_bulletproof_innerproduct_vfy_data::serialized_lr
const unsigned char * serialized_lr
Definition: inner_product_impl.h:92
secp256k1_bulletproof_innerproduct_pf_ecmult_context::geng
const secp256k1_ge * geng
Definition: inner_product_impl.h:512
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::genh
const secp256k1_ge * genh
Definition: inner_product_impl.h:101
secp256k1_bulletproof_innerproduct_vfy_data::xcache
secp256k1_scalar xcache[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
Definition: inner_product_impl.h:90
secp256k1_bulletproof_innerproduct_context::n_extra_rangeproof_points
size_t n_extra_rangeproof_points
Definition: inner_product_impl.h:80
secp256k1_bulletproof_innerproduct_pf_ecmult_context::grouping
size_t grouping
Definition: inner_product_impl.h:518
secp256k1_bulletproof_innerproduct_pf_ecmult_context::g
const secp256k1_ge * g
Definition: inner_product_impl.h:514
secp256k1_bulletproof_innerproduct_pf_ecmult_context::g_sc
secp256k1_scalar g_sc
Definition: inner_product_impl.h:517
main_impl.h
secp256k1_bulletproof_innerproduct_vfy_data::xsqinv
secp256k1_scalar xsqinv[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
Definition: inner_product_impl.h:88
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::n_proofs
size_t n_proofs
Definition: inner_product_impl.h:97
secp256k1_bulletproof_innerproduct_pf_ecmult_context::xinv
secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH]
Definition: inner_product_impl.h:509
secp256k1_bulletproof_innerproduct_pf_ecmult_context::b
const secp256k1_scalar * b
Definition: inner_product_impl.h:516
secp256k1_bulletproof_vfy_callback
int() secp256k1_bulletproof_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data)
Definition: inner_product_impl.h:70
secp256k1_ecmult_context
Definition: ecmult.h:15
secp256k1_bulletproof_innerproduct_vfy_data::xsqinvy
secp256k1_scalar xsqinvy[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
Definition: inner_product_impl.h:89
secp256k1_bulletproof_innerproduct_pf_ecmult_context
Definition: inner_product_impl.h:507
secp256k1_bulletproof_innerproduct_context::yinv
secp256k1_scalar yinv
Definition: inner_product_impl.h:76
secp256k1_ge
A group element of the secp256k1 curve, in affine coordinates.
Definition: group.h:14
SECP256K1_BULLETPROOF_MAX_DEPTH
#define SECP256K1_BULLETPROOF_MAX_DEPTH
Definition: secp256k1_bulletproofs.h:25
secp256k1_bulletproof_generators
Definition: main_impl.h:54
secp256k1_bulletproof_innerproduct_vfy_ecmult_context::g
const secp256k1_ge * g
Definition: inner_product_impl.h:99