barretenberg
Loading...
Searching...
No Matches
uint.fuzzer.hpp
1#include "barretenberg/numeric/random/engine.hpp"
2#include "barretenberg/stdlib/primitives/bool/bool.hpp"
3#include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp"
4#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders_fwd.hpp"
5#include "barretenberg/stdlib/primitives/field/field.hpp"
6#include "barretenberg/stdlib/primitives/uint/uint.hpp"
7#pragma clang diagnostic push
8#pragma clang diagnostic ignored "-Wc99-designator"
9
10// This is a global variable, so that the execution handling class could alter it and signal to the input tester that
11// the input should fail
12bool circuit_should_fail = false;
13
14#define HAVOC_TESTING
15
16#include "barretenberg/common/fuzzer.hpp"
17FastRandom VarianceRNG(0);
18
19// Enable this definition, when you want to find out the instructions that caused a failure
20// #define SHOW_INFORMATION 1
21
22#define OPERATION_TYPE_SIZE 1
23
24#define ELEMENT_SIZE (sizeof(fr) + 1)
25#define TWO_IN_ONE_OUT 3
26#define THREE_IN_ONE_OUT 4
27#define SLICE_ARGS_SIZE 6
28
33template <typename Builder> class UintFuzzBase {
34 private:
42
43 template <class From, class To> static To from_to(const From& in, const std::optional<size_t> size = std::nullopt)
44 {
45 return To(in.data(), in.data() + (size ? *size : in.size()));
46 }
47
48 public:
54 public:
55 enum OPCODE {
56 CONSTANT,
57 ADD,
58 SUBTRACT,
59 MULTIPLY,
60 DIVIDE,
61 MODULO,
62 AND,
63 OR,
64 XOR,
65 GET_BIT,
66 SHL,
67 SHR,
68 ROL,
69 ROR,
70 NOT,
71 SET,
72 RANDOMSEED,
73 _LAST
74 };
75
76 struct TwoArgs {
77 uint8_t in;
78 uint8_t out;
79 };
80 struct ThreeArgs {
81 uint8_t in1;
82 uint8_t in2;
83 uint8_t out;
84 };
85 struct BitArgs {
86 uint8_t in;
87 uint8_t out;
88 uint64_t bit;
89 };
91 uint32_t randomseed;
92 uint64_t element;
93 TwoArgs twoArgs;
94 ThreeArgs threeArgs;
95 BitArgs bitArgs;
96 };
97 // The type of instruction
98 OPCODE id;
99 // Instruction arguments
100 ArgumentContents arguments;
108 template <typename T>
109 inline static Instruction generateRandom(T& rng)
110 requires SimpleRng<T>
111 {
112 // Choose which instruction we are going to generate
113 OPCODE instruction_opcode = static_cast<OPCODE>(rng.next() % (OPCODE::_LAST));
114 uint8_t in1, in2, out;
115 uint32_t bit;
116 // Depending on instruction
117 switch (instruction_opcode) {
118 case OPCODE::CONSTANT:
119 return { .id = instruction_opcode, .arguments.element = rng.next() };
120 break;
121 case OPCODE::ADD:
122 case OPCODE::SUBTRACT:
123 case OPCODE::MULTIPLY:
124 case OPCODE::DIVIDE:
125 case OPCODE::MODULO:
126 case OPCODE::AND:
127 case OPCODE::OR:
128 case OPCODE::XOR:
129 // For two-input-one-output instructions we just randomly pick each argument and generate an instruction
130 // accordingly
131 in1 = static_cast<uint8_t>(rng.next() & 0xff);
132 in2 = static_cast<uint8_t>(rng.next() & 0xff);
133 out = static_cast<uint8_t>(rng.next() & 0xff);
134 return { .id = instruction_opcode, .arguments.threeArgs = { .in1 = in1, .in2 = in2, .out = out } };
135 break;
136 case OPCODE::GET_BIT:
137 case OPCODE::SHL:
138 case OPCODE::SHR:
139 case OPCODE::ROL:
140 case OPCODE::ROR:
141 in1 = static_cast<uint8_t>(rng.next() & 0xff);
142 out = static_cast<uint8_t>(rng.next() & 0xff);
143 bit = static_cast<uint32_t>(rng.next() & 0xffffffff);
144 return { .id = instruction_opcode, .arguments.bitArgs = { .in = in1, .out = out, .bit = bit } };
145 case OPCODE::NOT:
146 case OPCODE::SET:
147 in1 = static_cast<uint8_t>(rng.next() & 0xff);
148 out = static_cast<uint8_t>(rng.next() & 0xff);
149 return { .id = instruction_opcode, .arguments.twoArgs = { .in = in1, .out = out } };
150 break;
151 case OPCODE::RANDOMSEED:
152 return { .id = instruction_opcode, .arguments.randomseed = rng.next() };
153 break;
154 default:
155 abort(); // We have missed some instructions, it seems
156 break;
157 }
158 }
159
169 template <typename T>
170 inline static Instruction mutateInstruction(Instruction instruction, T& rng, HavocSettings& havoc_config)
171 requires SimpleRng<T>
172 {
173 (void)rng;
174 (void)havoc_config;
175#define PUT_RANDOM_BYTE_IF_LUCKY(variable) \
176 if (rng.next() & 1) { \
177 variable = rng.next() & 0xff; \
178 }
179#define PUT_RANDOM_TWO_BYTES_IF_LUCKY(variable) \
180 if (rng.next() & 1) { \
181 variable = rng.next() & 0xffff; \
182 }
183#define PUT_RANDOM_EIGHT_BYTES_IF_LUCKY(variable) \
184 if (rng.next() & 1) { \
185 variable = rng.next() & 0xffffffff; \
186 variable <<= 32; \
187 variable += rng.next() & 0xffffffff; \
188 }
189 // Depending on instruction type...
190 switch (instruction.id) {
191 case OPCODE::CONSTANT:
192 break;
193 case OPCODE::ADD:
194 case OPCODE::SUBTRACT:
195 case OPCODE::MULTIPLY:
196 case OPCODE::DIVIDE:
197 case OPCODE::MODULO:
198 case OPCODE::AND:
199 case OPCODE::OR:
200 case OPCODE::XOR:
201 // Randomly sample each of the arguments with 50% probability
202 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.in1)
203 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.in2)
204 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.out)
205 break;
206 case OPCODE::GET_BIT:
207 case OPCODE::SHL:
208 case OPCODE::SHR:
209 case OPCODE::ROL:
210 case OPCODE::ROR:
211 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.bitArgs.in)
212 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.bitArgs.out)
213 PUT_RANDOM_EIGHT_BYTES_IF_LUCKY(instruction.arguments.bitArgs.bit)
214 case OPCODE::NOT:
215 case OPCODE::SET:
216 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.twoArgs.in)
217 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.twoArgs.out)
218 break;
219 case OPCODE::RANDOMSEED:
220 instruction.arguments.randomseed = rng.next();
221 break;
222 default:
223 abort(); // New instruction encountered
224 break;
225 }
226 // Return mutated instruction
227 return instruction;
228 }
229 };
230 // We use argsizes to both specify the size of data needed to parse the instruction and to signal that the
231 // instruction is enabled (if it is -1,it's disabled )
232 class ArgSizes {
233 public:
234 static constexpr size_t CONSTANT = sizeof(uint64_t);
235 static constexpr size_t ADD = 3;
236 static constexpr size_t SUBTRACT = 3;
237 static constexpr size_t MULTIPLY = 3;
238 static constexpr size_t DIVIDE = 3;
239 static constexpr size_t MODULO = 3;
240 static constexpr size_t AND = 3;
241 static constexpr size_t OR = 3;
242 static constexpr size_t XOR = 3;
243 static constexpr size_t GET_BIT = 10;
244 static constexpr size_t SHL = 10;
245 static constexpr size_t SHR = 10;
246 static constexpr size_t ROL = 10;
247 static constexpr size_t ROR = 10;
248 static constexpr size_t NOT = 2;
249 static constexpr size_t SET = 2;
250 static constexpr size_t RANDOMSEED = sizeof(uint32_t);
251 };
256 class Parser {
257 public:
265 template <typename Instruction::OPCODE opcode> inline static Instruction parseInstructionArgs(uint8_t* Data)
266 {
267 if constexpr (opcode == Instruction::OPCODE::CONSTANT) {
268 return Instruction{ .id = static_cast<typename Instruction::OPCODE>(opcode),
269 .arguments.element = *((uint64_t*)Data) };
270 }
271 if constexpr (opcode == Instruction::OPCODE::ADD || opcode == Instruction::OPCODE::SUBTRACT ||
272 opcode == Instruction::OPCODE::MULTIPLY || opcode == Instruction::OPCODE::DIVIDE ||
273 opcode == Instruction::OPCODE::MODULO || opcode == Instruction::OPCODE::AND ||
274 opcode == Instruction::OPCODE::OR || opcode == Instruction::OPCODE::XOR) {
275 return { .id = static_cast<typename Instruction::OPCODE>(opcode),
276 .arguments.threeArgs = { .in1 = *Data, .in2 = *(Data + 1), .out = *(Data + 2) } };
277 }
278 if constexpr (opcode == Instruction::OPCODE::GET_BIT || opcode == Instruction::OPCODE::SHL ||
279 opcode == Instruction::OPCODE::SHR || opcode == Instruction::OPCODE::ROL ||
280 opcode == Instruction::OPCODE::ROR) {
281 return Instruction{ .id = static_cast<typename Instruction::OPCODE>(opcode),
282 .arguments.bitArgs = {
283 .in = *Data, .out = *(Data + 1), .bit = *((uint64_t*)(Data + 2)) } };
284 }
285 if constexpr (opcode == Instruction::OPCODE::NOT || opcode == Instruction::OPCODE::SET) {
286 return { .id = static_cast<typename Instruction::OPCODE>(opcode),
287 .arguments.twoArgs = { .in = *Data, .out = *(Data + 1) } };
288 }
289 if constexpr (opcode == Instruction::OPCODE::RANDOMSEED) {
290 uint32_t randomseed;
291 memcpy(&randomseed, Data, sizeof(uint32_t));
292 return Instruction{ .id = static_cast<typename Instruction::OPCODE>(opcode),
293 .arguments.randomseed = randomseed };
294 };
295 }
303 template <typename Instruction::OPCODE instruction_opcode>
304 inline static void writeInstruction(Instruction& instruction, uint8_t* Data)
305 {
306 if constexpr (instruction_opcode == Instruction::OPCODE::CONSTANT) {
307 *Data = instruction.id;
308 memcpy(Data + 1, &instruction.arguments.element, sizeof(uint64_t));
309 }
310 if constexpr (instruction_opcode == Instruction::OPCODE::ADD ||
311 instruction_opcode == Instruction::OPCODE::SUBTRACT ||
312 instruction_opcode == Instruction::OPCODE::MULTIPLY ||
313 instruction_opcode == Instruction::OPCODE::DIVIDE ||
314 instruction_opcode == Instruction::OPCODE::MODULO ||
315 instruction_opcode == Instruction::OPCODE::AND ||
316 instruction_opcode == Instruction::OPCODE::OR ||
317 instruction_opcode == Instruction::OPCODE::XOR) {
318 *Data = instruction.id;
319 *(Data + 1) = instruction.arguments.threeArgs.in1;
320 *(Data + 2) = instruction.arguments.threeArgs.in2;
321 *(Data + 3) = instruction.arguments.threeArgs.out;
322 }
323 if constexpr (instruction_opcode == Instruction::OPCODE::GET_BIT ||
324 instruction_opcode == Instruction::OPCODE::SHL ||
325 instruction_opcode == Instruction::OPCODE::SHR ||
326 instruction_opcode == Instruction::OPCODE::ROL ||
327 instruction_opcode == Instruction::OPCODE::ROR) {
328 *Data = instruction.id;
329 *(Data + 1) = instruction.arguments.bitArgs.in;
330 *(Data + 2) = instruction.arguments.bitArgs.out;
331 *((uint64_t*)(Data + 3)) = instruction.arguments.bitArgs.bit;
332 }
333 if constexpr (instruction_opcode == Instruction::OPCODE::NOT ||
334 instruction_opcode == Instruction::OPCODE::SET) {
335 *Data = instruction.id;
336 *(Data + 1) = instruction.arguments.twoArgs.in;
337 *(Data + 2) = instruction.arguments.twoArgs.out;
338 }
339 if constexpr (instruction_opcode == Instruction::OPCODE::RANDOMSEED) {
340
341 *Data = instruction.id;
342 memcpy(Data + 1, &instruction.arguments.randomseed, sizeof(uint32_t));
343 }
344 }
345 };
351 private:
352 template <class T> static T shl(const T v, const size_t bits)
353 {
354 if (bits >= sizeof(T) * 8) {
355 return 0;
356 } else {
357 return static_cast<T>(v << bits);
358 }
359 }
360 template <class T> static T shr(const T v, const size_t bits)
361 {
362 if (bits >= sizeof(T) * 8) {
363 return 0;
364 } else {
365 return static_cast<T>(v >> bits);
366 }
367 }
368 template <class T> static T get_bit(const T v, const size_t bit)
369 {
370 if (bit >= sizeof(T) * 8) {
371 return 0;
372 } else {
373 return (v & (uint64_t(1) << bit)) ? 1 : 0;
374 }
375 }
376 /* wrapper for uint::at which ensures the context of
377 * the return value has been set
378 */
379 template <class T> static bool_t at(const T& v, const size_t bit_index)
380 {
381 const auto ret = v.at(bit_index);
382
383 if (ret.get_context() != v.get_context()) {
384 std::cerr << "Context of return bool_t not set" << std::endl;
385 abort();
386 }
387
388 return ret;
389 }
390 template <class T> static T get_bit(Builder* builder, const T& v, const size_t bit)
391 {
392 return T(builder, std::vector<bool_t>{ at<>(v, bit) });
393 }
394 template <class T> static std::vector<bool_t> to_bit_vector(const T& v)
395 {
396 std::vector<bool_t> bits;
397 for (size_t i = 0; i < v.get_width(); i++) {
398 bits.push_back(at<>(v, i));
399 }
400 return bits;
401 }
402 template <class T> static std::array<bool_t, T::width> to_bit_array(const T& v)
403 {
404 std::array<bool_t, T::width> bits;
405 for (size_t i = 0; i < T::width; i++) {
406 bits[i] = at<>(v, i);
407 }
408 return bits;
409 }
410 template <class T> static uint256_t get_value(const T& v)
411 {
412 const auto ret = v.get_value();
413
414 if (ret.get_msb() >= T::width) {
415 std::cerr << "uint256_t returned by get_value() exceeds type width" << std::endl;
416 abort();
417 }
418
419 return std::move(ret);
420 }
421 template <class T> static byte_array_t to_byte_array(const T& v)
422 {
423 const auto ret = static_cast<byte_array_t>(v);
424
425 static_assert(T::width % 8 == 0);
426 if (ret.size() > T::width / 8) {
427 std::cerr << "byte_array version of uint exceeds type width" << std::endl;
428 abort();
429 }
430
431 return ret;
432 }
433 template <class T> static field_t to_field_t(const T& v)
434 {
435 auto ret = static_cast<field_t>(v);
436
437 if (static_cast<uint256_t>(ret.get_value()) != v.get_value()) {
438 std::cerr << "field_t version of uint differs from its value" << std::endl;
439 abort();
440 }
441
442 return ret;
443 }
444
445 public:
446 class Uint {
447 public:
448 uint_8_t v8;
449 uint_16_t v16;
450 uint_32_t v32;
451 uint_64_t v64;
452
453 Uint() = default;
454 Uint(uint_8_t v8, uint_16_t v16, uint_32_t v32, uint_64_t v64)
455 : v8(v8)
456 , v16(v16)
457 , v32(v32)
458 , v64(v64)
459 {}
460 Uint(Builder* builder, const uint64_t v)
461 : v8(builder, static_cast<uint8_t>(v & 0xFF))
462 , v16(builder, static_cast<uint16_t>(v & 0xFFFF))
463 , v32(builder, static_cast<uint32_t>(v & 0xFFFFFFFF))
464 , v64(builder, v)
465 {}
466 };
467 class Reference {
468 public:
469 uint8_t v8;
470 uint16_t v16;
471 uint32_t v32;
472 uint64_t v64;
473
474 Reference() = default;
475 Reference(uint8_t v8, uint16_t v16, uint32_t v32, uint64_t v64)
476 : v8(v8)
477 , v16(v16)
478 , v32(v32)
479 , v64(v64)
480 {}
481 Reference(const Uint& u)
482 : v8(get_value<>(u.v8))
483 , v16(get_value<>(u.v16))
484 , v32(get_value<>(u.v32))
485 , v64(get_value<>(u.v64))
486 {}
487 };
488 Reference ref;
489 Uint uint;
490
491 ExecutionHandler() = default;
493 : ref(r)
494 , uint(u)
495 {}
496 ExecutionHandler(Reference r, Uint u)
497 : ref(r)
498 , uint(u)
499 {}
500 ExecutionHandler(Uint u)
501 : ref(u)
502 , uint(u)
503 {}
504 ExecutionHandler operator+(const ExecutionHandler& other) const
505 {
506 const Reference ref_result(this->ref.v8 + other.ref.v8,
507 this->ref.v16 + other.ref.v16,
508 this->ref.v32 + other.ref.v32,
509 this->ref.v64 + other.ref.v64);
510
511 switch (VarianceRNG.next() % 2) {
512 case 0:
513 /* + operator */
514 return ExecutionHandler(ref_result,
515 Uint(this->uint.v8 + other.uint.v8,
516 this->uint.v16 + other.uint.v16,
517 this->uint.v32 + other.uint.v32,
518 this->uint.v64 + other.uint.v64));
519 case 1:
520 /* += operator */
521 {
522 Uint u = uint;
523
524 u.v8 += other.uint.v8;
525 u.v16 += other.uint.v16;
526 u.v32 += other.uint.v32;
527 u.v64 += other.uint.v64;
528
529 return ExecutionHandler(ref_result, u);
530 }
531 default:
532 abort();
533 }
534 }
535 ExecutionHandler operator-(const ExecutionHandler& other) const
536 {
537 const Reference ref_result(this->ref.v8 - other.ref.v8,
538 this->ref.v16 - other.ref.v16,
539 this->ref.v32 - other.ref.v32,
540 this->ref.v64 - other.ref.v64);
541
542 switch (VarianceRNG.next() % 2) {
543 case 0:
544 /* - operator */
545 return ExecutionHandler(ref_result,
546 Uint(this->uint.v8 - other.uint.v8,
547 this->uint.v16 - other.uint.v16,
548 this->uint.v32 - other.uint.v32,
549 this->uint.v64 - other.uint.v64));
550 case 1:
551 /* -= operator */
552 {
553 Uint u = uint;
554
555 u.v8 -= other.uint.v8;
556 u.v16 -= other.uint.v16;
557 u.v32 -= other.uint.v32;
558 u.v64 -= other.uint.v64;
559
560 return ExecutionHandler(ref_result, u);
561 }
562 default:
563 abort();
564 }
565 }
566 ExecutionHandler operator*(const ExecutionHandler& other) const
567 {
568 const Reference ref_result(this->ref.v8 * other.ref.v8,
569 this->ref.v16 * other.ref.v16,
570 this->ref.v32 * other.ref.v32,
571 this->ref.v64 * other.ref.v64);
572
573 switch (VarianceRNG.next() % 2) {
574 case 0:
575 /* * operator */
576 return ExecutionHandler(ref_result,
577 Uint(this->uint.v8 * other.uint.v8,
578 this->uint.v16 * other.uint.v16,
579 this->uint.v32 * other.uint.v32,
580 this->uint.v64 * other.uint.v64));
581 case 1:
582 /* *= operator */
583 {
584 Uint u = uint;
585
586 u.v8 *= other.uint.v8;
587 u.v16 *= other.uint.v16;
588 u.v32 *= other.uint.v32;
589 u.v64 *= other.uint.v64;
590
591 return ExecutionHandler(ref_result, u);
592 }
593 default:
594 abort();
595 }
596 }
597 ExecutionHandler operator/(const ExecutionHandler& other) const
598 {
599 const bool divisor_zero =
600 other.ref.v8 == 0 || other.ref.v16 == 0 || other.ref.v32 == 0 || other.ref.v64 == 0;
601 const Reference ref_result(other.ref.v8 == 0 ? 0 : this->ref.v8 / other.ref.v8,
602 other.ref.v16 == 0 ? 0 : this->ref.v16 / other.ref.v16,
603 other.ref.v32 == 0 ? 0 : this->ref.v32 / other.ref.v32,
604 other.ref.v64 == 0 ? 0 : this->ref.v64 / other.ref.v64);
605
606 if (divisor_zero) {
607 circuit_should_fail = true;
608 }
609
610 switch (VarianceRNG.next() % 2) {
611 case 0:
612 /* / operator */
613 return ExecutionHandler(ref_result,
614 Uint(this->uint.v8 / other.uint.v8,
615 this->uint.v16 / other.uint.v16,
616 this->uint.v32 / other.uint.v32,
617 this->uint.v64 / other.uint.v64));
618 case 1:
619 /* /= operator */
620 {
621 Uint u = uint;
622
623 u.v8 /= other.uint.v8;
624 u.v16 /= other.uint.v16;
625 u.v32 /= other.uint.v32;
626 u.v64 /= other.uint.v64;
627
628 return ExecutionHandler(ref_result, u);
629 }
630 default:
631 abort();
632 }
633 }
634 ExecutionHandler operator%(const ExecutionHandler& other) const
635 {
636 const bool divisor_zero =
637 other.ref.v8 == 0 || other.ref.v16 == 0 || other.ref.v32 == 0 || other.ref.v64 == 0;
638 const Reference ref_result(other.ref.v8 == 0 ? 0 : this->ref.v8 % other.ref.v8,
639 other.ref.v16 == 0 ? 0 : this->ref.v16 % other.ref.v16,
640 other.ref.v32 == 0 ? 0 : this->ref.v32 % other.ref.v32,
641 other.ref.v64 == 0 ? 0 : this->ref.v64 % other.ref.v64);
642
643 if (divisor_zero) {
644 circuit_should_fail = true;
645 }
646
647 switch (VarianceRNG.next() % 2) {
648 case 0:
649 /* % operator */
650 return ExecutionHandler(ref_result,
651 Uint(this->uint.v8 % other.uint.v8,
652 this->uint.v16 % other.uint.v16,
653 this->uint.v32 % other.uint.v32,
654 this->uint.v64 % other.uint.v64));
655 case 1:
656 /* %= operator */
657 {
658 Uint u = uint;
659
660 u.v8 %= other.uint.v8;
661 u.v16 %= other.uint.v16;
662 u.v32 %= other.uint.v32;
663 u.v64 %= other.uint.v64;
664
665 return ExecutionHandler(ref_result, u);
666 }
667 default:
668 abort();
669 }
670 }
671 ExecutionHandler operator&(const ExecutionHandler& other) const
672 {
673 const Reference ref_result(this->ref.v8 & other.ref.v8,
674 this->ref.v16 & other.ref.v16,
675 this->ref.v32 & other.ref.v32,
676 this->ref.v64 & other.ref.v64);
677
678 switch (VarianceRNG.next() % 2) {
679 case 0:
680 /* & operator */
681 return ExecutionHandler(ref_result,
682 Uint(this->uint.v8 & other.uint.v8,
683 this->uint.v16 & other.uint.v16,
684 this->uint.v32 & other.uint.v32,
685 this->uint.v64 & other.uint.v64));
686 case 1:
687 /* &= operator */
688 {
689 Uint u = uint;
690
691 u.v8 &= other.uint.v8;
692 u.v16 &= other.uint.v16;
693 u.v32 &= other.uint.v32;
694 u.v64 &= other.uint.v64;
695
696 return ExecutionHandler(ref_result, u);
697 }
698 default:
699 abort();
700 }
701 }
702 ExecutionHandler operator|(const ExecutionHandler& other) const
703 {
704 const Reference ref_result(this->ref.v8 | other.ref.v8,
705 this->ref.v16 | other.ref.v16,
706 this->ref.v32 | other.ref.v32,
707 this->ref.v64 | other.ref.v64);
708
709 switch (VarianceRNG.next() % 2) {
710 case 0:
711 /* | operator */
712 return ExecutionHandler(ref_result,
713 Uint(this->uint.v8 | other.uint.v8,
714 this->uint.v16 | other.uint.v16,
715 this->uint.v32 | other.uint.v32,
716 this->uint.v64 | other.uint.v64));
717 case 1:
718 /* |= operator */
719 {
720 Uint u = uint;
721
722 u.v8 |= other.uint.v8;
723 u.v16 |= other.uint.v16;
724 u.v32 |= other.uint.v32;
725 u.v64 |= other.uint.v64;
726
727 return ExecutionHandler(ref_result, u);
728 }
729 default:
730 abort();
731 }
732 }
733 ExecutionHandler operator^(const ExecutionHandler& other) const
734 {
735 const Reference ref_result(this->ref.v8 ^ other.ref.v8,
736 this->ref.v16 ^ other.ref.v16,
737 this->ref.v32 ^ other.ref.v32,
738 this->ref.v64 ^ other.ref.v64);
739
740 switch (VarianceRNG.next() % 2) {
741 case 0:
742 /* ^ operator */
743 return ExecutionHandler(ref_result,
744 Uint(this->uint.v8 ^ other.uint.v8,
745 this->uint.v16 ^ other.uint.v16,
746 this->uint.v32 ^ other.uint.v32,
747 this->uint.v64 ^ other.uint.v64));
748 case 1:
749 /* ^= operator */
750 {
751 Uint u = uint;
752
753 u.v8 ^= other.uint.v8;
754 u.v16 ^= other.uint.v16;
755 u.v32 ^= other.uint.v32;
756 u.v64 ^= other.uint.v64;
757
758 return ExecutionHandler(ref_result, u);
759 }
760 default:
761 abort();
762 }
763 }
764 ExecutionHandler get_bit(Builder* builder, const size_t bit) const
765 {
766 return ExecutionHandler(Reference(this->get_bit<uint8_t>(this->ref.v8, bit),
767 this->get_bit<uint16_t>(this->ref.v16, bit),
768 this->get_bit<uint32_t>(this->ref.v32, bit),
769 this->get_bit<uint64_t>(this->ref.v64, bit)),
770 Uint(this->get_bit<uint_8_t>(builder, this->uint.v8, bit),
771 this->get_bit<uint_16_t>(builder, this->uint.v16, bit),
772 this->get_bit<uint_32_t>(builder, this->uint.v32, bit),
773 this->get_bit<uint_64_t>(builder, this->uint.v64, bit)));
774 }
775 ExecutionHandler shl(const size_t bits) const
776 {
777 const Reference ref_result(shl<uint8_t>(this->ref.v8, bits),
778 shl<uint16_t>(this->ref.v16, bits),
779 shl<uint32_t>(this->ref.v32, bits),
780 shl<uint64_t>(this->ref.v64, bits));
781
782 switch (VarianceRNG.next() % 2) {
783 case 0:
784 /* << operator */
785 return ExecutionHandler(
786 ref_result,
787 Uint(
788 this->uint.v8 << bits, this->uint.v16 << bits, this->uint.v32 << bits, this->uint.v64 << bits));
789 case 1:
790 /* <<= operator */
791 {
792 Uint u = uint;
793
794 u.v8 <<= bits;
795 u.v16 <<= bits;
796 u.v32 <<= bits;
797 u.v64 <<= bits;
798
799 return ExecutionHandler(ref_result, u);
800 }
801 default:
802 abort();
803 }
804 }
805 ExecutionHandler shr(const size_t bits) const
806 {
807 const Reference ref_result(shr<uint8_t>(this->ref.v8, bits),
808 shr<uint16_t>(this->ref.v16, bits),
809 shr<uint32_t>(this->ref.v32, bits),
810 shr<uint64_t>(this->ref.v64, bits));
811
812 switch (VarianceRNG.next() % 2) {
813 case 0:
814 /* >> operator */
815 return ExecutionHandler(
816 ref_result,
817 Uint(
818 this->uint.v8 >> bits, this->uint.v16 >> bits, this->uint.v32 >> bits, this->uint.v64 >> bits));
819 case 1:
820 /* >>= operator */
821 {
822 Uint u = uint;
823
824 u.v8 >>= bits;
825 u.v16 >>= bits;
826 u.v32 >>= bits;
827 u.v64 >>= bits;
828
829 return ExecutionHandler(ref_result, u);
830 }
831 default:
832 abort();
833 }
834 }
835 ExecutionHandler rol(const size_t bits) const
836 {
837 return ExecutionHandler(Reference(std::rotl(this->ref.v8, static_cast<int>(bits % 8)),
838 std::rotl(this->ref.v16, static_cast<int>(bits % 16)),
839 std::rotl(this->ref.v32, static_cast<int>(bits % 32)),
840 std::rotl(this->ref.v64, static_cast<int>(bits % 64))),
841 Uint(this->uint.v8.rol(bits),
842 this->uint.v16.rol(bits),
843 this->uint.v32.rol(bits),
844 this->uint.v64.rol(bits)));
845 }
846 ExecutionHandler ror(const size_t bits) const
847 {
848 return ExecutionHandler(Reference(std::rotr(this->ref.v8, static_cast<int>(bits % 8)),
849 std::rotr(this->ref.v16, static_cast<int>(bits % 16)),
850 std::rotr(this->ref.v32, static_cast<int>(bits % 32)),
851 std::rotr(this->ref.v64, static_cast<int>(bits % 64))),
852 Uint(this->uint.v8.ror(bits),
853 this->uint.v16.ror(bits),
854 this->uint.v32.ror(bits),
855 this->uint.v64.ror(bits)));
856 }
857 ExecutionHandler not_(void) const
858 {
859 return ExecutionHandler(Reference(~this->ref.v8, ~this->ref.v16, ~this->ref.v32, ~this->ref.v64),
860 Uint(~this->uint.v8, ~this->uint.v16, ~this->uint.v32, ~this->uint.v64));
861 }
862 /* Explicit re-instantiation using the various constructors */
863 ExecutionHandler set(Builder* builder) const
864 {
865 switch (VarianceRNG.next() % 7) {
866 case 0:
867 return ExecutionHandler(this->ref,
868 Uint(uint_8_t(this->uint.v8),
869 uint_16_t(this->uint.v16),
870 uint_32_t(this->uint.v32),
871 uint_64_t(this->uint.v64)));
872 case 1:
873 return ExecutionHandler(this->ref,
874 Uint(uint_8_t(builder, get_value<>(this->uint.v8)),
875 uint_16_t(builder, get_value<>(this->uint.v16)),
876 uint_32_t(builder, get_value<>(this->uint.v32)),
877 uint_64_t(builder, get_value<>(this->uint.v64))));
878 case 2:
879 return ExecutionHandler(this->ref,
880 Uint(uint_8_t(this->to_field_t(this->uint.v8)),
881 uint_16_t(this->to_field_t(this->uint.v16)),
882 uint_32_t(this->to_field_t(this->uint.v32)),
883 uint_64_t(this->to_field_t(this->uint.v64))));
884 case 3:
885 return ExecutionHandler(this->ref,
886 Uint(uint_8_t(this->to_byte_array(this->uint.v8)),
887 uint_16_t(this->to_byte_array(this->uint.v16)),
888 uint_32_t(this->to_byte_array(this->uint.v32)),
889 uint_64_t(this->to_byte_array(this->uint.v64))));
890 case 4:
891 return ExecutionHandler(this->ref,
892 Uint(uint_8_t(builder, this->to_bit_vector(this->uint.v8)),
893 uint_16_t(builder, this->to_bit_vector(this->uint.v16)),
894 uint_32_t(builder, this->to_bit_vector(this->uint.v32)),
895 uint_64_t(builder, this->to_bit_vector(this->uint.v64))));
896 case 5:
897 return ExecutionHandler(this->ref,
898 Uint(uint_8_t(builder, this->to_bit_array(this->uint.v8)),
899 uint_16_t(builder, this->to_bit_array(this->uint.v16)),
900 uint_32_t(builder, this->to_bit_array(this->uint.v32)),
901 uint_64_t(builder, this->to_bit_array(this->uint.v64))));
902 case 6:
903 return ExecutionHandler(this->ref,
904 Uint(uint_8_t(builder, this->ref.v8),
905 uint_16_t(builder, this->ref.v16),
906 uint_32_t(builder, this->ref.v32),
907 uint_64_t(builder, this->ref.v64)));
908 default:
909 abort();
910 }
911 }
920 static inline size_t execute_CONSTANT(Builder* builder,
921 std::vector<ExecutionHandler>& stack,
922 Instruction& instruction)
923 {
924 stack.push_back(Uint(builder, instruction.arguments.element));
925 return 0;
926 }
935 static inline size_t execute_ADD(Builder* builder,
936 std::vector<ExecutionHandler>& stack,
937 Instruction& instruction)
938 {
939 (void)builder;
940 if (stack.size() == 0) {
941 return 1;
942 }
943 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
944 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
945 size_t output_index = instruction.arguments.threeArgs.out;
946
947 ExecutionHandler result;
948 result = stack[first_index] + stack[second_index];
949 // If the output index is larger than the number of elements in stack, append
950 if (output_index >= stack.size()) {
951 stack.push_back(result);
952 } else {
953 stack[output_index] = result;
954 }
955 return 0;
956 };
965 static inline size_t execute_SUBTRACT(Builder* builder,
966 std::vector<ExecutionHandler>& stack,
967 Instruction& instruction)
968 {
969 (void)builder;
970 if (stack.size() == 0) {
971 return 1;
972 }
973 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
974 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
975 size_t output_index = instruction.arguments.threeArgs.out;
976
977 ExecutionHandler result;
978 result = stack[first_index] - stack[second_index];
979 // If the output index is larger than the number of elements in stack, append
980 if (output_index >= stack.size()) {
981 stack.push_back(result);
982 } else {
983 stack[output_index] = result;
984 }
985 return 0;
986 };
995 static inline size_t execute_MULTIPLY(Builder* builder,
996 std::vector<ExecutionHandler>& stack,
997 Instruction& instruction)
998 {
999 (void)builder;
1000 if (stack.size() == 0) {
1001 return 1;
1002 }
1003 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1004 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1005 size_t output_index = instruction.arguments.threeArgs.out;
1006
1007 ExecutionHandler result;
1008 result = stack[first_index] * stack[second_index];
1009 // If the output index is larger than the number of elements in stack, append
1010 if (output_index >= stack.size()) {
1011 stack.push_back(result);
1012 } else {
1013 stack[output_index] = result;
1014 }
1015 return 0;
1016 };
1025 static inline size_t execute_DIVIDE(Builder* builder,
1026 std::vector<ExecutionHandler>& stack,
1027 Instruction& instruction)
1028 {
1029 (void)builder;
1030 if (stack.size() == 0) {
1031 return 1;
1032 }
1033 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1034 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1035 size_t output_index = instruction.arguments.threeArgs.out;
1036
1037 ExecutionHandler result;
1038 result = stack[first_index] / stack[second_index];
1039 // If the output index is larger than the number of elements in stack, append
1040 if (output_index >= stack.size()) {
1041 stack.push_back(result);
1042 } else {
1043 stack[output_index] = result;
1044 }
1045 return 0;
1046 };
1055 static inline size_t execute_MODULO(Builder* builder,
1056 std::vector<ExecutionHandler>& stack,
1057 Instruction& instruction)
1058 {
1059 (void)builder;
1060 if (stack.size() == 0) {
1061 return 1;
1062 }
1063 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1064 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1065 size_t output_index = instruction.arguments.threeArgs.out;
1066
1067 ExecutionHandler result;
1068 result = stack[first_index] % stack[second_index];
1069 // If the output index is larger than the number of elements in stack, append
1070 if (output_index >= stack.size()) {
1071 stack.push_back(result);
1072 } else {
1073 stack[output_index] = result;
1074 }
1075 return 0;
1076 };
1085 static inline size_t execute_AND(Builder* builder,
1086 std::vector<ExecutionHandler>& stack,
1087 Instruction& instruction)
1088 {
1089 (void)builder;
1090 if (stack.size() == 0) {
1091 return 1;
1092 }
1093 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1094 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1095 size_t output_index = instruction.arguments.threeArgs.out;
1096
1097 ExecutionHandler result;
1098 result = stack[first_index] & stack[second_index];
1099 // If the output index is larger than the number of elements in stack, append
1100 if (output_index >= stack.size()) {
1101 stack.push_back(result);
1102 } else {
1103 stack[output_index] = result;
1104 }
1105 return 0;
1106 };
1115 static inline size_t execute_OR(Builder* builder,
1116 std::vector<ExecutionHandler>& stack,
1117 Instruction& instruction)
1118 {
1119 (void)builder;
1120 if (stack.size() == 0) {
1121 return 1;
1122 }
1123 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1124 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1125 size_t output_index = instruction.arguments.threeArgs.out;
1126
1127 ExecutionHandler result;
1128 result = stack[first_index] | stack[second_index];
1129 // If the output index is larger than the number of elements in stack, append
1130 if (output_index >= stack.size()) {
1131 stack.push_back(result);
1132 } else {
1133 stack[output_index] = result;
1134 }
1135 return 0;
1136 };
1145 static inline size_t execute_XOR(Builder* builder,
1146 std::vector<ExecutionHandler>& stack,
1147 Instruction& instruction)
1148 {
1149 (void)builder;
1150 if (stack.size() == 0) {
1151 return 1;
1152 }
1153 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1154 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1155 size_t output_index = instruction.arguments.threeArgs.out;
1156
1157 ExecutionHandler result;
1158 result = stack[first_index] ^ stack[second_index];
1159 // If the output index is larger than the number of elements in stack, append
1160 if (output_index >= stack.size()) {
1161 stack.push_back(result);
1162 } else {
1163 stack[output_index] = result;
1164 }
1165 return 0;
1166 };
1175 static inline size_t execute_GET_BIT(Builder* builder,
1176 std::vector<ExecutionHandler>& stack,
1177 Instruction& instruction)
1178 {
1179 if (stack.size() == 0) {
1180 return 1;
1181 }
1182 size_t first_index = instruction.arguments.bitArgs.in % stack.size();
1183 size_t output_index = instruction.arguments.bitArgs.out;
1184 const uint64_t bit = instruction.arguments.bitArgs.bit;
1185 ExecutionHandler result;
1186 result = stack[first_index].get_bit(builder, bit);
1187 // If the output index is larger than the number of elements in stack, append
1188 if (output_index >= stack.size()) {
1189 stack.push_back(result);
1190 } else {
1191 stack[output_index] = result;
1192 }
1193 return 0;
1194 };
1203 static inline size_t execute_SHL(Builder* builder,
1204 std::vector<ExecutionHandler>& stack,
1205 Instruction& instruction)
1206 {
1207 (void)builder;
1208 if (stack.size() == 0) {
1209 return 1;
1210 }
1211 size_t first_index = instruction.arguments.bitArgs.in % stack.size();
1212 size_t output_index = instruction.arguments.bitArgs.out;
1213 const uint64_t bit = instruction.arguments.bitArgs.bit;
1214 ExecutionHandler result;
1215 result = stack[first_index].shl(bit);
1216 // If the output index is larger than the number of elements in stack, append
1217 if (output_index >= stack.size()) {
1218 stack.push_back(result);
1219 } else {
1220 stack[output_index] = result;
1221 }
1222 return 0;
1223 };
1232 static inline size_t execute_SHR(Builder* builder,
1233 std::vector<ExecutionHandler>& stack,
1234 Instruction& instruction)
1235 {
1236 (void)builder;
1237 if (stack.size() == 0) {
1238 return 1;
1239 }
1240 size_t first_index = instruction.arguments.bitArgs.in % stack.size();
1241 size_t output_index = instruction.arguments.bitArgs.out;
1242 const uint64_t bit = instruction.arguments.bitArgs.bit;
1243 ExecutionHandler result;
1244 result = stack[first_index].shr(bit);
1245 // If the output index is larger than the number of elements in stack, append
1246 if (output_index >= stack.size()) {
1247 stack.push_back(result);
1248 } else {
1249 stack[output_index] = result;
1250 }
1251 return 0;
1252 };
1261 static inline size_t execute_ROL(Builder* builder,
1262 std::vector<ExecutionHandler>& stack,
1263 Instruction& instruction)
1264 {
1265 (void)builder;
1266 if (stack.size() == 0) {
1267 return 1;
1268 }
1269 size_t first_index = instruction.arguments.bitArgs.in % stack.size();
1270 size_t output_index = instruction.arguments.bitArgs.out;
1271 const uint64_t bit = instruction.arguments.bitArgs.bit;
1272 ExecutionHandler result;
1273 result = stack[first_index].rol(bit);
1274 // If the output index is larger than the number of elements in stack, append
1275 if (output_index >= stack.size()) {
1276 stack.push_back(result);
1277 } else {
1278 stack[output_index] = result;
1279 }
1280 return 0;
1281 };
1290 static inline size_t execute_ROR(Builder* builder,
1291 std::vector<ExecutionHandler>& stack,
1292 Instruction& instruction)
1293 {
1294 (void)builder;
1295 if (stack.size() == 0) {
1296 return 1;
1297 }
1298 size_t first_index = instruction.arguments.bitArgs.in % stack.size();
1299 size_t output_index = instruction.arguments.bitArgs.out;
1300 const uint64_t bit = instruction.arguments.bitArgs.bit;
1301 ExecutionHandler result;
1302 result = stack[first_index].ror(bit);
1303 // If the output index is larger than the number of elements in stack, append
1304 if (output_index >= stack.size()) {
1305 stack.push_back(result);
1306 } else {
1307 stack[output_index] = result;
1308 }
1309 return 0;
1310 };
1319 static inline size_t execute_NOT(Builder* builder,
1320 std::vector<ExecutionHandler>& stack,
1321 Instruction& instruction)
1322 {
1323 (void)builder;
1324 if (stack.size() == 0) {
1325 return 1;
1326 }
1327 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1328 size_t output_index = instruction.arguments.twoArgs.out;
1329
1330 ExecutionHandler result;
1331 result = stack[first_index].not_();
1332 // If the output index is larger than the number of elements in stack, append
1333 if (output_index >= stack.size()) {
1334 stack.push_back(result);
1335 } else {
1336 stack[output_index] = result;
1337 }
1338 return 0;
1339 };
1348 static inline size_t execute_SET(Builder* builder,
1349 std::vector<ExecutionHandler>& stack,
1350 Instruction& instruction)
1351 {
1352 if (stack.size() == 0) {
1353 return 1;
1354 }
1355 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1356 size_t output_index = instruction.arguments.twoArgs.out;
1357
1358 ExecutionHandler result;
1359 result = stack[first_index].set(builder);
1360 // If the output index is larger than the number of elements in stack, append
1361 if (output_index >= stack.size()) {
1362 stack.push_back(result);
1363 } else {
1364 stack[output_index] = result;
1365 }
1366 return 0;
1367 };
1376 static inline size_t execute_RANDOMSEED(Builder* builder,
1377 std::vector<ExecutionHandler>& stack,
1378 Instruction& instruction)
1379 {
1380 (void)builder;
1381 (void)stack;
1382
1383 VarianceRNG.reseed(instruction.arguments.randomseed);
1384 return 0;
1385 };
1386 };
1387
1388 typedef std::vector<ExecutionHandler> ExecutionState;
1398 inline static bool postProcess(Builder* builder, std::vector<UintFuzzBase::ExecutionHandler>& stack)
1399 {
1400 (void)builder;
1401 for (size_t i = 0; i < stack.size(); i++) {
1402 auto element = stack[i];
1403 if (element.uint.v8.get_value() != element.ref.v8) {
1404 std::cerr << "Failed at " << i << " with actual u8 value " << static_cast<size_t>(element.ref.v8)
1405 << " and value in uint " << element.uint.v8.get_value() << std::endl;
1406 return false;
1407 }
1408 if (element.uint.v16.get_value() != element.ref.v16) {
1409 std::cerr << "Failed at " << i << " with actual u16 value " << static_cast<size_t>(element.ref.v16)
1410 << " and value in uint " << element.uint.v16.get_value() << std::endl;
1411 return false;
1412 }
1413 if (element.uint.v32.get_value() != element.ref.v32) {
1414 std::cerr << "Failed at " << i << " with actual u32 value " << static_cast<size_t>(element.ref.v32)
1415 << " and value in uint " << element.uint.v32.get_value() << std::endl;
1416 return false;
1417 }
1418 if (element.uint.v64.get_value() != element.ref.v64) {
1419 std::cerr << "Failed at " << i << " with actual u64 value " << static_cast<size_t>(element.ref.v64)
1420 << " and value in uint " << element.uint.v64.get_value() << std::endl;
1421 return false;
1422 }
1423 }
1424 return true;
1425 }
1426};
1427
1428#ifdef HAVOC_TESTING
1429
1430extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv)
1431{
1432 (void)argc;
1433 (void)argv;
1434 // These are the settings, optimized for the safeuint class (under them, fuzzer reaches maximum expected coverage in
1435 // 40 seconds)
1436 fuzzer_havoc_settings = HavocSettings{
1437 .GEN_LLVM_POST_MUTATION_PROB = 30, // Out of 200
1438 .GEN_MUTATION_COUNT_LOG = 5, // Fully checked
1439 .GEN_STRUCTURAL_MUTATION_PROBABILITY = 300, // Fully checked
1440 .GEN_VALUE_MUTATION_PROBABILITY = 700, // Fully checked
1441 .ST_MUT_DELETION_PROBABILITY = 100, // Fully checked
1442 .ST_MUT_DUPLICATION_PROBABILITY = 80, // Fully checked
1443 .ST_MUT_INSERTION_PROBABILITY = 120, // Fully checked
1444 .ST_MUT_MAXIMUM_DELETION_LOG = 6, // Fully checked
1445 .ST_MUT_MAXIMUM_DUPLICATION_LOG = 2, // Fully checked
1446 .ST_MUT_SWAP_PROBABILITY = 50, // Fully checked
1447 .VAL_MUT_LLVM_MUTATE_PROBABILITY = 250, // Fully checked
1448 .VAL_MUT_MONTGOMERY_PROBABILITY = 130, // Fully checked
1449 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = 50, // Fully checked
1450 .VAL_MUT_SMALL_ADDITION_PROBABILITY = 110, // Fully checked
1451 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = 130 // Fully checked
1452
1453 };
1458 /*
1459 std::random_device rd;
1460 std::uniform_int_distribution<uint64_t> dist(0, ~(uint64_t)(0));
1461 srandom(static_cast<unsigned int>(dist(rd)));
1462
1463 fuzzer_havoc_settings =
1464 HavocSettings{ .GEN_MUTATION_COUNT_LOG = static_cast<size_t>((random() % 8) + 1),
1465 .GEN_STRUCTURAL_MUTATION_PROBABILITY = static_cast<size_t>(random() % 100),
1466 .GEN_VALUE_MUTATION_PROBABILITY = static_cast<size_t>(random() % 100),
1467 .ST_MUT_DELETION_PROBABILITY = static_cast<size_t>(random() % 100),
1468 .ST_MUT_DUPLICATION_PROBABILITY = static_cast<size_t>(random() % 100),
1469 .ST_MUT_INSERTION_PROBABILITY = static_cast<size_t>((random() % 99) + 1),
1470 .ST_MUT_MAXIMUM_DELETION_LOG = static_cast<size_t>((random() % 8) + 1),
1471 .ST_MUT_MAXIMUM_DUPLICATION_LOG = static_cast<size_t>((random() % 8) + 1),
1472 .ST_MUT_SWAP_PROBABILITY = static_cast<size_t>(random() % 100),
1473 .VAL_MUT_LLVM_MUTATE_PROBABILITY = static_cast<size_t>(random() % 100),
1474 .VAL_MUT_MONTGOMERY_PROBABILITY = static_cast<size_t>(random() % 100),
1475 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = static_cast<size_t>(random() % 100),
1476 .VAL_MUT_SMALL_ADDITION_PROBABILITY = static_cast<size_t>(random() % 100),
1477 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = static_cast<size_t>(random() % 100)
1478
1479 };
1480 while (fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY == 0 &&
1481 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY == 0) {
1482 fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY = static_cast<size_t>(random() % 8);
1483 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY = static_cast<size_t>(random() % 8);
1484 }
1485 */
1486
1487 // fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB = static_cast<size_t>(((random() % (20 - 1)) + 1) * 10);
1492 /*
1493 std::cerr << "CUSTOM MUTATOR SETTINGS:" << std::endl
1494 << "################################################################" << std::endl
1495 << "GEN_LLVM_POST_MUTATION_PROB: " << fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB << std::endl
1496 << "GEN_MUTATION_COUNT_LOG: " << fuzzer_havoc_settings.GEN_MUTATION_COUNT_LOG << std::endl
1497 << "GEN_STRUCTURAL_MUTATION_PROBABILITY: " << fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY
1498 << std::endl
1499 << "GEN_VALUE_MUTATION_PROBABILITY: " << fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY << std::endl
1500 << "ST_MUT_DELETION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY << std::endl
1501 << "ST_MUT_DUPLICATION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY << std::endl
1502 << "ST_MUT_INSERTION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY << std::endl
1503 << "ST_MUT_MAXIMUM_DELETION_LOG: " << fuzzer_havoc_settings.ST_MUT_MAXIMUM_DELETION_LOG << std::endl
1504 << "ST_MUT_MAXIMUM_DUPLICATION_LOG: " << fuzzer_havoc_settings.ST_MUT_MAXIMUM_DUPLICATION_LOG << std::endl
1505 << "ST_MUT_SWAP_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY << std::endl
1506 << "VAL_MUT_LLVM_MUTATE_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY
1507 << std::endl
1508 << "VAL_MUT_MONTGOMERY_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_MONTGOMERY_PROBABILITY << std::endl
1509 << "VAL_MUT_NON_MONTGOMERY_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_NON_MONTGOMERY_PROBABILITY
1510 << std::endl
1511 << "VAL_MUT_SMALL_ADDITION_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY
1512 << std::endl
1513 << "VAL_MUT_SMALL_MULTIPLICATION_PROBABILITY: "
1514 << fuzzer_havoc_settings.VAL_MUT_SMALL_MULTIPLICATION_PROBABILITY << std::endl
1515 << "VAL_MUT_SPECIAL_VALUE_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY
1516 << std::endl;
1517 */
1518 std::vector<size_t> structural_mutation_distribution;
1519 std::vector<size_t> value_mutation_distribution;
1520 size_t temp = 0;
1521 temp += fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY;
1522 structural_mutation_distribution.push_back(temp);
1523 temp += fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY;
1524 structural_mutation_distribution.push_back(temp);
1525 temp += fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY;
1526 structural_mutation_distribution.push_back(temp);
1527 temp += fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY;
1528 structural_mutation_distribution.push_back(temp);
1529 fuzzer_havoc_settings.structural_mutation_distribution = structural_mutation_distribution;
1530
1531 temp = 0;
1532 temp += fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY;
1533 value_mutation_distribution.push_back(temp);
1534 temp += fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY;
1535 value_mutation_distribution.push_back(temp);
1536
1537 temp += fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY;
1538 value_mutation_distribution.push_back(temp);
1539 fuzzer_havoc_settings.value_mutation_distribution = value_mutation_distribution;
1540 return 0;
1541}
1542#endif
1543#ifndef DISABLE_CUSTOM_MUTATORS
1548extern "C" size_t LLVMFuzzerCustomMutator(uint8_t* Data, size_t Size, size_t MaxSize, unsigned int Seed)
1549{
1551 auto fast_random = FastRandom(Seed);
1552 auto size_occupied = ArithmeticFuzzHelper<FuzzerClass>::MutateInstructionBuffer(Data, Size, MaxSize, fast_random);
1553 if ((fast_random.next() % 200) < fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB) {
1554 size_occupied = LLVMFuzzerMutate(Data, size_occupied, MaxSize);
1555 }
1556 return size_occupied;
1557}
1558
1563extern "C" size_t LLVMFuzzerCustomCrossOver(const uint8_t* Data1,
1564 size_t Size1,
1565 const uint8_t* Data2,
1566 size_t Size2,
1567 uint8_t* Out,
1568 size_t MaxOutSize,
1569 unsigned int Seed)
1570{
1572 auto fast_random = FastRandom(Seed);
1575 auto vecC = ArithmeticFuzzHelper<FuzzerClass>::crossoverInstructionVector(vecA, vecB, fast_random);
1577}
1578
1579#endif
1580
1585extern "C" size_t LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size)
1586{
1587 RunWithBuilders<UintFuzzBase, FuzzerCircuitTypes>(Data, Size, VarianceRNG);
1588 return 0;
1589}
1590
1591#pragma clang diagnostic pop
static size_t writeInstructionsToBuffer(std::vector< typename T::Instruction > &instructions, uint8_t *Data, size_t MaxSize)
Write instructions into the buffer until there are no instructions left or there is no more space.
Definition: fuzzer.hpp:561
static std::vector< typename T::Instruction > parseDataIntoInstructions(const uint8_t *Data, size_t Size)
Parses a given data buffer into a vector of instructions for testing the arithmetic.
Definition: fuzzer.hpp:520
static size_t MutateInstructionBuffer(uint8_t *Data, size_t Size, size_t MaxSize, FastRandom &rng)
Interpret the data buffer as a series of arithmetic instructions and mutate it accordingly.
Definition: fuzzer.hpp:670
static std::vector< typename T::Instruction > crossoverInstructionVector(const std::vector< typename T::Instruction > &vecA, const std::vector< typename T::Instruction > &vecB, FastRandom &rng)
Splice two instruction vectors into one randomly.
Definition: fuzzer.hpp:458
Class for quickly deterministically creating new random values. We don't care about distribution much...
Definition: fuzzer.hpp:64
Definition: uint.fuzzer.hpp:232
Definition: uint.fuzzer.hpp:467
Definition: uint.fuzzer.hpp:446
This class implements the execution of safeuint with an oracle to detect discrepancies.
Definition: uint.fuzzer.hpp:350
static size_t execute_SHR(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the right-shift operator instruction.
Definition: uint.fuzzer.hpp:1232
static size_t execute_ADD(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the addition operator instruction.
Definition: uint.fuzzer.hpp:935
static size_t execute_GET_BIT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the GET_BIT instruction.
Definition: uint.fuzzer.hpp:1175
static size_t execute_XOR(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the xor operator instruction.
Definition: uint.fuzzer.hpp:1145
static size_t execute_MULTIPLY(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the multiply instruction.
Definition: uint.fuzzer.hpp:995
static size_t execute_NOT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the NOT instruction.
Definition: uint.fuzzer.hpp:1319
static size_t execute_SHL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the left-shift operator instruction.
Definition: uint.fuzzer.hpp:1203
static size_t execute_DIVIDE(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the division operator instruction.
Definition: uint.fuzzer.hpp:1025
static size_t execute_RANDOMSEED(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the RANDOMSEED instruction.
Definition: uint.fuzzer.hpp:1376
static size_t execute_CONSTANT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant instruction (push constant safeuint to the stack)
Definition: uint.fuzzer.hpp:920
static size_t execute_ROR(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the right-rotate operator instruction.
Definition: uint.fuzzer.hpp:1290
static size_t execute_ROL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the left-rotate operator instruction.
Definition: uint.fuzzer.hpp:1261
static size_t execute_MODULO(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the modulo operator instruction.
Definition: uint.fuzzer.hpp:1055
static size_t execute_OR(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the or operator instruction.
Definition: uint.fuzzer.hpp:1115
static size_t execute_SUBTRACT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the subtraction operator instruction.
Definition: uint.fuzzer.hpp:965
static size_t execute_AND(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the and operator instruction.
Definition: uint.fuzzer.hpp:1085
static size_t execute_SET(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the SET instruction.
Definition: uint.fuzzer.hpp:1348
A class representing a single fuzzing instruction.
Definition: uint.fuzzer.hpp:53
static Instruction generateRandom(T &rng)
Generate a random instruction.
Definition: uint.fuzzer.hpp:109
static Instruction mutateInstruction(Instruction instruction, T &rng, HavocSettings &havoc_config)
Mutate a single instruction.
Definition: uint.fuzzer.hpp:170
Parser class handles the parsing and writing the instructions back to data buffer.
Definition: uint.fuzzer.hpp:256
static void writeInstruction(Instruction &instruction, uint8_t *Data)
Write a single instruction to buffer.
Definition: uint.fuzzer.hpp:304
static Instruction parseInstructionArgs(uint8_t *Data)
Parse a single instruction from data.
Definition: uint.fuzzer.hpp:265
The class parametrizing Uint fuzzing instructions, execution, etc.
Definition: uint.fuzzer.hpp:33
static bool postProcess(Builder *builder, std::vector< UintFuzzBase::ExecutionHandler > &stack)
Check that the resulting values are equal to expected.
Definition: uint.fuzzer.hpp:1398
Definition: uint256.hpp:25
Definition: standard_circuit_builder.hpp:12
Definition: byte_array.hpp:9
Definition: field.hpp:10
A standard library fixed-width unsigned integer type. Useful, e.g., for hashing. Use safe_uint instea...
Definition: uint.hpp:24
uint ror(const size_t target_rotation) const
Definition: logic.cpp:295
Concept for a simple PRNG which returns a uint32_t when next is called.
Definition: fuzzer.hpp:91
Definition: fuzzer.hpp:27
Definition: uint.fuzzer.hpp:85
Definition: uint.fuzzer.hpp:80
Definition: uint.fuzzer.hpp:76
Definition: uint.fuzzer.hpp:90