barretenberg
Loading...
Searching...
No Matches
biggroup_bn254.hpp
1#pragma once
10namespace proof_system::plonk {
11namespace stdlib {
12
23template <class C, class Fq, class Fr, class G>
24template <typename, typename>
25 requires(IsNotGoblinBuilder<C>)
27 const std::vector<element>& big_points,
28 const std::vector<Fr>& big_scalars,
29 const std::vector<element>& small_points,
30 const std::vector<Fr>& small_scalars,
31 const Fr& generator_scalar,
32 const size_t max_num_small_bits)
33{
34 C* ctx = nullptr;
35 for (auto element : big_points) {
36 if (element.get_context()) {
37 ctx = element.get_context();
38 break;
39 }
40 }
41 if constexpr (!HasPlookup<C>) {
42 // MERGENOTE: these four lines don't have an equivalent in d-b-p
43 std::vector<element> modified_big_points = big_points;
44 std::vector<Fr> modified_big_scalars = big_scalars;
45 modified_big_points.emplace_back(element::one(ctx));
46 modified_big_scalars.emplace_back(generator_scalar);
47 return bn254_endo_batch_mul(
48 modified_big_points, modified_big_scalars, small_points, small_scalars, max_num_small_bits);
49 } else {
50 constexpr size_t NUM_BIG_POINTS = 4;
51 // TODO: handle situation where big points size is not 4 :/
52
53 auto big_table_pair =
54 create_endo_pair_quad_lookup_table({ big_points[0], big_points[1], big_points[2], big_points[3] });
55 auto& big_table = big_table_pair.first;
56 auto& endo_table = big_table_pair.second;
57 batch_lookup_table small_table(small_points);
58 std::vector<std::vector<bool_t<C>>> big_naf_entries;
59 std::vector<std::vector<bool_t<C>>> endo_naf_entries;
60 std::vector<std::vector<bool_t<C>>> small_naf_entries;
61
62 const auto split_into_endomorphism_scalars = [ctx](const Fr& scalar) {
63 barretenberg::fr k = scalar.get_value();
64 barretenberg::fr k1(0);
65 barretenberg::fr k2(0);
66 barretenberg::fr::split_into_endomorphism_scalars(k.from_montgomery_form(), k1, k2);
67 Fr scalar_k1 = Fr::from_witness(ctx, k1.to_montgomery_form());
68 Fr scalar_k2 = Fr::from_witness(ctx, k2.to_montgomery_form());
69 barretenberg::fr beta = barretenberg::fr::cube_root_of_unity();
70 scalar.assert_equal(scalar_k1 - scalar_k2 * beta);
71 return std::make_pair<Fr, Fr>((Fr)scalar_k1, (Fr)scalar_k2);
72 };
73
74 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
75 const auto [scalar_k1, scalar_k2] = split_into_endomorphism_scalars(big_scalars[i]);
76 big_naf_entries.emplace_back(compute_naf(scalar_k1, max_num_small_bits));
77 endo_naf_entries.emplace_back(compute_naf(scalar_k2, max_num_small_bits));
78 }
79
80 const auto [generator_k1, generator_k2] = split_into_endomorphism_scalars(generator_scalar);
81 const std::vector<field_t<C>> generator_wnaf = compute_wnaf<128, 8>(generator_k1);
82 const std::vector<field_t<C>> generator_endo_wnaf = compute_wnaf<128, 8>(generator_k2);
83 const auto generator_table =
84 element::eight_bit_fixed_base_table<>(element::eight_bit_fixed_base_table<>::CurveType::BN254, false);
85 const auto generator_endo_table =
86 element::eight_bit_fixed_base_table<>(element::eight_bit_fixed_base_table<>::CurveType::BN254, true);
87
88 for (size_t i = 0; i < small_points.size(); ++i) {
89 small_naf_entries.emplace_back(compute_naf(small_scalars[i], max_num_small_bits));
90 }
91
92 const size_t num_rounds = max_num_small_bits;
93
94 const auto offset_generators = compute_offset_generators(num_rounds);
95
96 auto init_point = element::chain_add(offset_generators.first, small_table.get_chain_initial_entry());
97 init_point = element::chain_add(endo_table[0], init_point);
98 init_point = element::chain_add(big_table[0], init_point);
99
100 element accumulator = element::chain_add_end(init_point);
101
102 const auto get_point_to_add = [&](size_t naf_index) {
103 std::vector<bool_t<C>> small_nafs;
104 std::vector<bool_t<C>> big_nafs;
105 std::vector<bool_t<C>> endo_nafs;
106 for (size_t i = 0; i < small_points.size(); ++i) {
107 small_nafs.emplace_back(small_naf_entries[i][naf_index]);
108 }
109 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
110 big_nafs.emplace_back(big_naf_entries[i][naf_index]);
111 endo_nafs.emplace_back(endo_naf_entries[i][naf_index]);
112 }
113
114 auto to_add = small_table.get_chain_add_accumulator(small_nafs);
115 to_add = element::chain_add(big_table.get({ big_nafs[0], big_nafs[1], big_nafs[2], big_nafs[3] }), to_add);
116 to_add =
117 element::chain_add(endo_table.get({ endo_nafs[0], endo_nafs[1], endo_nafs[2], endo_nafs[3] }), to_add);
118 return to_add;
119 };
120
121 // Perform multiple rounds of the montgomery ladder algoritm per "iteration" of our main loop.
122 // This is in order to reduce the number of field reductions required when calling `multiple_montgomery_ladder`
123 constexpr size_t num_rounds_per_iteration = 4;
124
125 // we require that we perform max of one generator per iteration
126 static_assert(num_rounds_per_iteration < 8);
127
128 size_t num_iterations = num_rounds / num_rounds_per_iteration;
129 num_iterations += ((num_iterations * num_rounds_per_iteration) == num_rounds) ? 0 : 1;
130 const size_t num_rounds_per_final_iteration =
131 (num_rounds - 1) - ((num_iterations - 1) * num_rounds_per_iteration);
132
133 size_t generator_idx = 0;
134 for (size_t i = 0; i < num_iterations; ++i) {
135
136 const size_t inner_num_rounds =
137 (i != num_iterations - 1) ? num_rounds_per_iteration : num_rounds_per_final_iteration;
138 std::vector<element::chain_add_accumulator> to_add;
139
140 for (size_t j = 0; j < inner_num_rounds; ++j) {
141 to_add.emplace_back(get_point_to_add(i * num_rounds_per_iteration + j + 1));
142 }
143
144 bool add_generator_this_round = false;
145 size_t add_idx = 0;
146 for (size_t j = 0; j < inner_num_rounds; ++j) {
147 add_generator_this_round = ((i * num_rounds_per_iteration + j) % 8) == 6;
148 if (add_generator_this_round) {
149 add_idx = j;
150 break;
151 }
152 }
153 if (add_generator_this_round) {
154 to_add[add_idx] = element::chain_add(generator_table[generator_wnaf[generator_idx]], to_add[add_idx]);
155 to_add[add_idx] =
156 element::chain_add(generator_endo_table[generator_endo_wnaf[generator_idx]], to_add[add_idx]);
157 generator_idx++;
158 }
159 accumulator = accumulator.multiple_montgomery_ladder(to_add);
160 }
161
162 for (size_t i = 0; i < small_points.size(); ++i) {
163 element skew = accumulator - small_points[i];
164 Fq out_x = accumulator.x.conditional_select(skew.x, small_naf_entries[i][num_rounds]);
165 Fq out_y = accumulator.y.conditional_select(skew.y, small_naf_entries[i][num_rounds]);
166 accumulator = element(out_x, out_y);
167 }
168
170 Fq beta(barretenberg::fr(beta_val.slice(0, 136)), barretenberg::fr(beta_val.slice(136, 256)), false);
171
172 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
173 element skew_point = big_points[i];
174 skew_point.x *= beta;
175 element skew = accumulator + skew_point;
176 Fq out_x = accumulator.x.conditional_select(skew.x, endo_naf_entries[i][num_rounds]);
177 Fq out_y = accumulator.y.conditional_select(skew.y, endo_naf_entries[i][num_rounds]);
178 accumulator = element(out_x, out_y);
179 }
180 {
181 element skew = accumulator - generator_table[128];
182 Fq out_x = accumulator.x.conditional_select(skew.x, bool_t<C>(generator_wnaf[generator_wnaf.size() - 1]));
183 Fq out_y = accumulator.y.conditional_select(skew.y, bool_t<C>(generator_wnaf[generator_wnaf.size() - 1]));
184 accumulator = element(out_x, out_y);
185 }
186 {
187 element skew = accumulator - generator_endo_table[128];
188 Fq out_x =
189 accumulator.x.conditional_select(skew.x, bool_t<C>(generator_endo_wnaf[generator_wnaf.size() - 1]));
190 Fq out_y =
191 accumulator.y.conditional_select(skew.y, bool_t<C>(generator_endo_wnaf[generator_wnaf.size() - 1]));
192 accumulator = element(out_x, out_y);
193 }
194
195 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
196 element skew = accumulator - big_points[i];
197 Fq out_x = accumulator.x.conditional_select(skew.x, big_naf_entries[i][num_rounds]);
198 Fq out_y = accumulator.y.conditional_select(skew.y, big_naf_entries[i][num_rounds]);
199 accumulator = element(out_x, out_y);
200 }
201 accumulator = accumulator - offset_generators.second;
202
203 return accumulator;
204 }
205}
206
218template <typename C, class Fq, class Fr, class G>
219template <typename, typename>
220 requires(IsNotGoblinBuilder<C>)
222 const std::vector<Fr>& big_scalars,
223 const std::vector<element>& small_points,
224 const std::vector<Fr>& small_scalars,
225 const size_t max_num_small_bits)
226{
227 ASSERT(max_num_small_bits >= 128);
228 const size_t num_big_points = big_points.size();
229 const size_t num_small_points = small_points.size();
230 C* ctx = nullptr;
231 for (auto element : big_points) {
232 if (element.get_context()) {
233 ctx = element.get_context();
234 break;
235 }
236 }
237
238 std::vector<element> points;
239 std::vector<Fr> scalars;
240 std::vector<element> endo_points;
241 std::vector<Fr> endo_scalars;
242
254 barretenberg::fr lambda = barretenberg::fr::cube_root_of_unity();
255 barretenberg::fq beta = barretenberg::fq::cube_root_of_unity();
256 for (size_t i = 0; i < num_big_points; ++i) {
257 Fr scalar = big_scalars[i];
258 // Q: is it a problem if wraps? get_value is 512 bits
259 // A: it can't wrap, this method only compiles if the Fr type is a field_t<barretenberg::fr> type
260
261 // Split k into short scalars (scalar_k1, scalar_k2) using bn254 endomorphism.
262 barretenberg::fr k = uint256_t(scalar.get_value());
263 barretenberg::fr k1(0);
264 barretenberg::fr k2(0);
265 barretenberg::fr::split_into_endomorphism_scalars(k.from_montgomery_form(), k1, k2);
266 Fr scalar_k1 = Fr::from_witness(ctx, k1.to_montgomery_form());
267 Fr scalar_k2 = Fr::from_witness(ctx, k2.to_montgomery_form());
268
269 // Add copy constraint that validates k1 = scalar_k1 - scalar_k2 * \lambda
270 scalar.assert_equal(scalar_k1 - scalar_k2 * lambda);
271 scalars.push_back(scalar_k1);
272 endo_scalars.push_back(scalar_k2);
273 element point = big_points[i];
274 points.push_back(point);
275
276 // negate the point that maps to the endo scalar `scalar_k2`
277 // instead of computing scalar_k1 * [P] - scalar_k2 * [P], we compute scalar_k1 * [P] + scalar_k2 * [-P]
278 point.y = -point.y;
279 point.x = point.x * Fq(ctx, uint256_t(beta));
280 point.y.self_reduce();
281 endo_points.push_back(point);
282 }
283 for (size_t i = 0; i < num_small_points; ++i) {
284 points.push_back(small_points[i]);
285 scalars.push_back(small_scalars[i]);
286 }
287 std::copy(endo_points.begin(), endo_points.end(), std::back_inserter(points));
288 std::copy(endo_scalars.begin(), endo_scalars.end(), std::back_inserter(scalars));
289
290 ASSERT(big_scalars.size() == num_big_points);
291 ASSERT(small_scalars.size() == num_small_points);
292
308 batch_lookup_table point_table(points);
309
322 const size_t num_rounds = max_num_small_bits;
323 const size_t num_points = points.size();
324 std::vector<std::vector<bool_t<C>>> naf_entries;
325 for (size_t i = 0; i < num_points; ++i) {
326 naf_entries.emplace_back(compute_naf(scalars[i], max_num_small_bits));
327 }
328
332 const auto offset_generators = compute_offset_generators(num_rounds);
333
339 element accumulator = offset_generators.first + point_table.get_initial_entry();
340
356 for (size_t i = 1; i < num_rounds / 2; ++i) {
357 // `nafs` tracks the naf value for each point for the current round
358 std::vector<bool_t<C>> nafs;
359 for (size_t j = 0; j < points.size(); ++j) {
360 nafs.emplace_back(naf_entries[j][i * 2 - 1]);
361 }
362
375 element::chain_add_accumulator add_1 = point_table.get_chain_add_accumulator(nafs);
376 for (size_t j = 0; j < points.size(); ++j) {
377 nafs[j] = (naf_entries[j][i * 2]);
378 }
379 element::chain_add_accumulator add_2 = point_table.get_chain_add_accumulator(nafs);
380
381 // Perform the double montgomery ladder.
382 accumulator = accumulator.multiple_montgomery_ladder({ add_1, add_2 });
383 }
384
385 // we need to iterate 1 more time if the number of rounds is even
386 if ((num_rounds & 0x01ULL) == 0x00ULL) {
387 std::vector<bool_t<C>> nafs;
388 for (size_t j = 0; j < points.size(); ++j) {
389 nafs.emplace_back(naf_entries[j][num_rounds - 1]);
390 }
391 element::chain_add_accumulator add_1 = point_table.get_chain_add_accumulator(nafs);
392 accumulator = accumulator.multiple_montgomery_ladder({ add_1 });
393 }
394
413 for (size_t i = 0; i < num_points; ++i) {
414 element skew = accumulator - points[i];
415 Fq out_x = accumulator.x.conditional_select(skew.x, naf_entries[i][num_rounds]);
416 Fq out_y = accumulator.y.conditional_select(skew.y, naf_entries[i][num_rounds]);
417 accumulator = element(out_x, out_y);
418 }
419
420 // Remove the offset generator point!
421 accumulator = accumulator - offset_generators.second;
422
423 // Return our scalar mul output
424 return accumulator;
425}
426} // namespace stdlib
427} // namespace proof_system::plonk
Definition: uint256.hpp:25
constexpr uint256_t slice(uint64_t start, uint64_t end) const
Definition: uint256_impl.hpp:157
Definition: biggroup.hpp:22
element multiple_montgomery_ladder(const std::vector< chain_add_accumulator > &to_add) const
Perform repeated iterations of the montgomery ladder algorithm.
Definition: biggroup_impl.hpp:458
static chain_add_accumulator chain_add(const element &p1, const chain_add_accumulator &accumulator)
Definition: biggroup_impl.hpp:183
static element chain_add_end(const chain_add_accumulator &accumulator)
Definition: biggroup_impl.hpp:228
Contains all the headers required to adequately compile the types defined in circuit_builders_fwd....
Definition: circuit_builders.hpp:11
Definition: circuit_builders.hpp:17
Definition: widget.bench.cpp:13
static void split_into_endomorphism_scalars(const field &k, field &k1, field &k2)
Definition: field_declarations.hpp:320