def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') if FLAGS.simulate and not FLAGS.codegen: raise app.UsageError('Must specify --codegen when --simulate is given.') # Test that we can write to the crash and summary path. for path in (FLAGS.crash_path, FLAGS.summary_path): if path: gfile.make_dirs(path) with gfile.open(os.path.join(path, 'test'), 'w') as f: print('test', file=f) start = datetime.datetime.now() physical_core_count = psutil.cpu_count(logical=False) worker_count = FLAGS.worker_count or physical_core_count worker_count = max(worker_count, 1) # Need at least one worker. queues = (multiprocess.get_user_data() or [mp.Queue() for _ in range(worker_count)]) queues = queues[:worker_count] print('-- Creating pool of {} workers; physical core count {}'.format( worker_count, physical_core_count)) workers = [] for i in range(worker_count): queue = None if multiprocess.has_user_data_support() else queues[i] target = run_fuzz_multiprocess.do_worker_task args = (i, queue, FLAGS.crash_path, FLAGS.summary_path, FLAGS.save_temps_path, FLAGS.minimize_ir) worker = multiprocess.Process(target=target, args=args) worker.start() workers.append(worker) duration_str = FLAGS.duration duration = None if duration_str is None else cli_helpers.parse_duration( duration_str) seed = FLAGS.seed if not seed: seed = random.randrange(0, 1 << 31) print('-- Using randomly generated seed:', seed) sys.stdout.flush() generator_options = ast_generator.AstGeneratorOptions( disallow_divide=FLAGS.disallow_divide, emit_loops=FLAGS.emit_loops, short_samples=FLAGS.short_samples, max_width_bits_types=FLAGS.max_width_bits_types, max_width_aggregate_types=FLAGS.max_width_aggregate_types) default_sample_options = sample.SampleOptions( convert_to_ir=True, optimize_ir=True, use_jit=FLAGS.use_llvm_jit, codegen=FLAGS.codegen, simulate=FLAGS.simulate, simulator=FLAGS.simulator, use_system_verilog=FLAGS.use_system_verilog) sample_count = run_fuzz_multiprocess.do_generator_task( queues, seed, generator_options, FLAGS.sample_count, FLAGS.calls_per_sample, default_sample_options=default_sample_options, duration=duration, print_samples=FLAGS.print_samples) for i, worker in enumerate(workers): print('-- Joining on worker {}'.format(i)) worker.join() delta = datetime.datetime.now() - start elapsed = delta.total_seconds() print( '-- Elapsed end-to-end: {} = {:.2f} seconds; {:,} samples; {:.2f} samples/s' .format(delta, elapsed, sample_count, sample_count / elapsed))
def do_worker_task(workerno: int, queue: Optional[mp.Queue], crash_path: Text, summary_path: Optional[Text] = None, save_temps_path: Optional[Text] = None, minimize_ir: bool = True) -> None: """Runs worker task, receiving commands from generator and executing them.""" queue = queue or multiprocess.get_user_data()[workerno] crashers = 0 calls = 0 print('---- Started worker {}'.format(workerno)) sys.stdout.flush() start = datetime.datetime.now() # Local file to write the summary information to before writing out to the # potentially remote (i.e. CNS) summary file. Avoids a potential CNS write # with every sample. Instead data is written out in batches. summary_file = os.path.join(summary_path, 'summary_%d.binarypb' % workerno) if summary_path else None summary_temp_file = tempfile.mkstemp( prefix='temp_summary_')[1] if summary_path else None i = 0 # Silence pylint warning. for i in itertools.count(): message = queue.get() if message.command == Command.STOP: break assert message.command == Command.RUN, message.command calls += len(message.sample.args_batch) run_dir = None if save_temps_path: run_dir = os.path.join(save_temps_path, str(message.sampleno)) os.makedirs(run_dir) else: run_dir = tempfile.mkdtemp(prefix='run_fuzz_') try: run_fuzz.run_sample( message.sample, run_dir, summary_file=summary_temp_file, generate_sample_ns=message.generate_sample_ns) except sample_runner.SampleError as e: crashers += 1 record_crasher(workerno, message.sampleno, minimize_ir, message.sample, run_dir, crash_path, crashers, str(e)) if summary_file and i % 25 == 0: # Append the local temporary summary file to the actual, potentially # remote one, and delete the temporary file. with gfile.open(summary_temp_file, 'rb') as f: summaries = f.read() with gfile.open(summary_file, 'ab+') as f: f.write(summaries) gfile.remove(summary_temp_file) if not save_temps_path: shutil.rmtree(run_dir) # TODO(leary): 2020-08-28 Turn this into an option. if i != 0 and i % 16 == 0: elapsed = (datetime.datetime.now() - start).total_seconds() print('---- Worker {:3}: {:8.2f} samples/s {:8.2f} calls/s'.format( workerno, i / elapsed, calls / elapsed)) sys.stdout.flush() elapsed = (datetime.datetime.now() - start).total_seconds() print( '---- Worker {:3} finished! {:3} crashers; {:8.2f} samples/s; {:8.2f} calls/s' .format(workerno, crashers, i / elapsed, calls / elapsed)) sys.stdout.flush()