def main(suite, analyser, verbose, timeout, files, bench):
    debug = verbose == 2
    exit_code = 0

    analyser_instance = None
    analysers = list_analysers() if analyser == 'all' else [analyser]

    for tool in analysers:
        try:
            analyser_cls = get_analyser(tool)
            analyser_instance = analyser_cls(debug, timeout)
            exit_code = run_benchmark_suite(analyser_instance, suite, verbose,
                                            debug, timeout, files, bench)
        except AnalyserError as e:
            print("Failed to initialize analyzer. "
                  "Program '{}' failed with {} error code".format(
                      e.cmd, e.returncode))
            exit_code = e.returncode
        finally:
            if analyser_instance:
                print("{} analyser finished. Cleaning up".format(
                    analyser_instance.get_name()))
                analyser_instance.cleanup()

    # "click" setting exit code this way...
    sys.exit(exit_code)
Exemple #2
0
def generate_benchmark_report(suite):
    project_root_dir = code_dir.parent
    suite_dir = project_root_dir / 'benchdata' / suite

    # Dictionary of analyser reports
    reports = {}
    # Common benchmark data
    data = {'benchmarks': {}, 'analyzers': {}}
    for analyser in list_analysers():
        yaml_file = suite_dir / (analyser + ".yaml")

        # Skip when yaml does not exist
        if not yaml_file.exists():
            print("Can not find report from '{}' analyser. Skipping...".format(
                analyser))
            continue

        with open(yaml_file, 'r') as fp:
            try:
                report = yaml.load(fp)
                reports[report['analyzer']] = report

                # Verify that report is generated by the same benchmark as already loaded reports
                if 'suite' in data:
                    if data['benchmark_url_dir'] != report['benchmark_url_dir']:
                        raise Exception(
                            'Report for {} generated by different benchmark {}'
                            .format(analyser, report['benchmark_url_dir']))
                else:
                    data['suite'] = report['suite']
                    data['benchmark_url_dir'] = report['benchmark_url_dir']
                    data['benchmark_link'] = report['benchmark_link']
                    data['benchmark_subdir'] = report['benchmark_subdir']

                # Store general benchmark information
                data['analyzers'][analyser] = {
                    'version': report['version'],
                    'total_time_str': report['total_time_str'],
                    'expected': report['expected'],
                    'error_execution': report['error_execution']
                }
                # Merge benchmark results with already loaded
                for name, result in report['benchmarks'].items():
                    if name not in data['benchmarks']:
                        data['benchmarks'][name] = {}
                    data['benchmarks'][name][analyser] = result
                    data['benchmarks'][name]['bug_type'] = result.get(
                        'bug_type', 'Unconfigured')
                    if 'link' in result.get('expected_data', []):
                        data['benchmarks'][name]['link'] = result[
                            'expected_data']['link']

                data['benchmark_count'] = len(data['benchmarks'])

            except yaml.YAMLError as exc:
                print(exc)

    print_html_report(data, project_root_dir, suite)
Exemple #3
0
    os.makedirs(benchdir, exist_ok=True)
    with open(benchdir / (analyser.get_name() + '.yaml'), 'w') as fp:
        yaml.dump(out_data, fp)


# TODO add json config lint function?
@click.command()
@click.option('--suite',
              '-s',
              type=click.Choice(['Suhabe', 'nssc']),
              default='Suhabe',
              help="Benchmark suite to run; "
              "nscc is an abbreviation for not-so-smart-contracts.")
@click.option('--analyser',
              '-a',
              type=click.Choice(list_analysers() + ['all']),
              default='all',
              help="Analyser tool to benchmark")
@click.option(
    '--verbose',
    '-v',
    count=True,
    help="More verbose output; use twice for the most verbose output.")
@click.option('--timeout',
              '-t',
              type=float,
              default=DEFAULT_TIMEOUT,
              help="Maximum time allowed on any single benchmark.")
@click.option('--files/--no-files',
              default=False,
              help="List files in benchmark and exit.")