コード例 #1
0
def _filter_benchmarks_from_file_imp(results, f, mode):
    assert mode == 'keep' or mode == 'out'
    rri_for_filter = ResultInfo.loadRawResultInfos(f)
    # Collect the keys present in the file
    keys = set()
    for ri in rri_for_filter['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        keys.add(key)
    report_initial(results)
    new_results = []
    if mode == 'keep':
        should_keep = lambda ri: ResultInfoUtil.get_result_info_key(ri) in keys
    elif mode == 'out':
        should_keep = lambda ri: ResultInfoUtil.get_result_info_key(
            ri) not in keys
    else:
        assert False

    for ri in results:
        if should_keep(ri):
            new_results.append(ri)
            _logger.debug('Keeping benchmark {}'.format(
                ResultInfoUtil.get_result_info_key(ri)))
        else:
            _logger.debug('Removing benchmark {}'.format(
                ResultInfoUtil.get_result_info_key(ri)))

    report_after(new_results, results)
    return new_results
コード例 #2
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base', type=str, default=os.getcwd())
    parser.add_argument('dest_dir', type=str)

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    # Make destination if it doesn't exist
    try:
        os.mkdir(pargs.dest_dir)
    except FileExistsError:
        pass

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    destination_root = os.path.abspath(pargs.dest_dir)
    assert os.path.exists(destination_root)

    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        # Construct source path
        if key.startswith(os.path.sep):
            key = key[1:]
        src_path = os.path.join(pargs.benchmark_base, key)
        if not os.path.exists(src_path):
            _logger.error('{} does not exist'.format(src_path))
            return 1
        # Construct destination path
        dirs = os.path.dirname(key)
        filename = os.path.basename(key)

        dest_dir = os.path.join(destination_root, dirs)
        _logger.debug('Destination dir is {}'.format(dest_dir))
        os.makedirs(dest_dir, exist_ok=True)
        dest_path = os.path.join(dest_dir, filename)
        _logger.info('Copying {} => {}'.format(src_path, dest_path))
        shutil.copy2(src_path, dest_path)
    return 0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('ii_template', type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base',
                        dest="benchmark_base",
                        default="",
                        type=str)
    parser.add_argument('--wd-base', dest="wd_base", default="", type=str)
    parser.add_argument(
        '--dump-tags',
        dest="dump_tags",
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--timeout',
        type=float,
        default=None,
    )
    parser.add_argument(
        '--use-dsoes-wallclock-time',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--bool-args',
        dest='bool_args',
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--output',
        default=sys.stdout,
        type=argparse.FileType('w'),
    )
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)
    _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base))
    _logger.info('Using working directory base of "{}"'.format(pargs.wd_base))

    extra_kwargs = {}
    bool_arg_re = re.compile(r'^([a-zA-z.]+)=(true|false)')
    for b in pargs.bool_args:
        m = bool_arg_re.match(b)
        if m is None:
            _logger.error('"{}" is not valid bool assignment'.format(b))
            return 1
        var_name = m.group(1)
        assignment = m.group(2)
        _logger.info('Adding extra param "{}" = {}'.format(
            var_name, assignment))
        if assignment == 'true':
            assignment_as_bool = True
        else:
            assert assignment == 'false'
            assignment_as_bool = False
        extra_kwargs[var_name] = assignment_as_bool

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
        _logger.info('Loading "{}"'.format(pargs.ii_template.name))
        ii_template = ResultInfo.loadRawResultInfos(pargs.ii_template)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    # Do grouping
    _logger.info('Performing merge')
    result_info_list = [result_infos, ii_template]
    key_to_result_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        result_info_list)
    list_was_empty = True
    for index, reject_list in enumerate(rejected_result_infos):
        for reject in reject_list:
            list_was_empty = False
            key = ResultInfoUtil.get_result_info_key(reject)
            _logger.info('{} was rejected'.format(key))
    if not list_was_empty:
        return 1
    _logger.info('Merge complete')

    runner = result_infos['misc']['runner']
    _logger.info('Found runner "{}"'.format(runner))
    backend = None
    if 'backend' in result_infos['misc']:
        backend = result_infos['misc']['backend']
    _logger.info('Backend was "{}"'.format(backend))

    output_ri = {
        'results': [],
        'schema_version': result_info_list[0]['schema_version'],
    }

    event_analyser = event_analysis.get_event_analyser_from_runner_name(
        runner,
        soft_timeout=pargs.timeout,
        use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time,
        **extra_kwargs)
    tag_to_keys = dict()
    non_trivial_known_tags = {
        'jfs_generic_unknown',
        'timeout',
        'soft_timeout',
        'jfs_dropped_stdout_bug_unknown',
        'unsupported_bv_sort',
        'unsupported_fp_sort',
        'unsupported_sorts',
    }
    trivial_known_tags = {
        'sat',
        'jfs_dropped_stdout_bug_sat',
        'jfs_dropped_stdout_bug_unsat',
        'unsat',
    }
    trivial_keys = set()
    non_trivial_keys = set()
    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        # Construct get event tag info
        geti = event_analysis.GETInfo(ri=ri,
                                      wd_base=pargs.wd_base,
                                      benchmark_base=pargs.benchmark_base,
                                      backend=backend)
        tag = event_analyser.get_event_tag(geti)
        if tag is None:
            _logger.error('Unhandled event for "{}"'.format(key))
            _logger.error(pprint.pformat(ri))
            return 1
        # The assumption here is that we are using JFS is dummy solving
        # mode. Benchmarks that aren't sat are non-trivial and so we should
        # annotate as such.
        is_trivial = False
        if tag in trivial_known_tags:
            is_trivial = True
            trivial_keys.add(key)
        else:
            if tag not in non_trivial_known_tags:
                _logger.error('Unsupported tag {} for {}'.format(tag, key))
                return 1
            non_trivial_keys.add(key)
        corresponding_ri = key_to_result_infos[key][1].copy()
        corresponding_ri['is_trivial'] = is_trivial
        output_ri['results'].append(corresponding_ri)

    _logger.info('# of trivial benchmarks: {}'.format(len(trivial_keys)))
    _logger.info('# of non-trivial benchmarks: {}'.format(
        len(non_trivial_keys)))

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(output_ri)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, output_ri)

    return 0
コード例 #4
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base',
                        dest="benchmark_base",
                        default="",
                        type=str)
    parser.add_argument('--wd-base', dest="wd_base", default="", type=str)
    parser.add_argument(
        '--dump-tags',
        dest="dump_tags",
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--timeout',
        type=float,
        default=None,
    )
    parser.add_argument(
        '--use-dsoes-wallclock-time',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--bool-args',
        dest='bool_args',
        nargs='+',
        default=[],
    )
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)
    _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base))
    _logger.info('Using working directory base of "{}"'.format(pargs.wd_base))

    extra_kwargs = {}
    bool_arg_re = re.compile(r'^([a-zA-z.]+)=(true|false)')
    for b in pargs.bool_args:
        m = bool_arg_re.match(b)
        if m is None:
            _logger.error('"{}" is not valid bool assignment'.format(b))
            return 1
        var_name = m.group(1)
        assignment = m.group(2)
        _logger.info('Adding extra param "{}" = {}'.format(
            var_name, assignment))
        if assignment == 'true':
            assignment_as_bool = True
        else:
            assert assignment == 'false'
            assignment_as_bool = False
        extra_kwargs[var_name] = assignment_as_bool

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    runner = result_infos['misc']['runner']
    _logger.info('Found runner "{}"'.format(runner))
    backend = None
    if 'backend' in result_infos['misc']:
        backend = result_infos['misc']['backend']
    _logger.info('Backend was "{}"'.format(backend))

    event_analyser = event_analysis.get_event_analyser_from_runner_name(
        runner,
        soft_timeout=pargs.timeout,
        use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time,
        **extra_kwargs)
    tag_to_keys = dict()
    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        # Construct get event tag info
        geti = event_analysis.GETInfo(ri=ri,
                                      wd_base=pargs.wd_base,
                                      benchmark_base=pargs.benchmark_base,
                                      backend=backend)
        tag = event_analyser.get_event_tag(geti)
        if tag is None:
            _logger.error('Unhandled event for "{}"'.format(key))
            _logger.error(pprint.pformat(ri))
            return 1
        # Record tag
        try:
            cur_set = tag_to_keys[tag]
            cur_set.add(key)
        except KeyError:
            tag_to_keys[tag] = {key}

    # Dump tags
    print("")
    print("TAG COUNTS")
    for tag_name, keys in sorted(tag_to_keys.items(), key=lambda k: k[0]):
        print("{}: {}".format(tag_name, len(keys)))
    # Dump requested tags
    for tag_name in pargs.dump_tags:
        if tag_name in tag_to_keys:
            print("{}: \n{}".format(tag_name,
                                    pprint.pformat(tag_to_keys[tag_name])))
        else:
            _logger.error('Tag "{}" not present'.format(tag_name))
    return 0
コード例 #5
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('original_result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('original_result_infos_wd',
                        type=str)
    parser.add_argument('patch_result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('patch_result_infos_wd',
                        type=str)
    parser.add_argument('output_result_info',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location for result info YAML file')
    parser.add_argument('output_result_infos_wd',
                        type=str)
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    # Validate paths
    if not check_dir_exists(pargs.original_result_infos_wd):
        return 1
    original_result_infos_wd = pargs.original_result_infos_wd
    if not check_dir_exists(pargs.patch_result_infos_wd):
        return 1
    patch_result_infos_wd = pargs.patch_result_infos_wd
    if os.path.exists(pargs.output_result_infos_wd):
        _logger.error('"{}" already exists'.format(pargs.output_result_infos_wd))
        return 1
    # Load YAML files
    original_raw_results_info = load_yaml(pargs.original_result_infos)
    if original_raw_results_info is None:
        return 1
    patch_raw_result_infos = load_yaml(pargs.patch_result_infos)
    if patch_raw_result_infos is None:
        return 1
    _logger.info('Loading done')

    # Group patch results by key for look-up
    key_to_patch_result = dict()
    for ri in patch_raw_result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        assert key not in key_to_patch_result
        key_to_patch_result[key] = ri

    # Construct new results info
    new_rri = original_raw_results_info.copy() # shallow copy
    new_results = []
    new_rri['results'] = new_results
     # Absolute paths to copy into new working directory map to destination name
    workdirs_to_copy = dict()

    used_keys = set()
    used_dest_names = set()
    patch_count = 0
    _logger.info('Constructing new results')
    for ri in original_raw_results_info['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        assert key not in used_keys
        ri_to_use = None
        wd_path_prefix = None
        wd_dest_name = None
        if key in key_to_patch_result:
            ri_to_use = key_to_patch_result[key]
            wd_path_prefix = patch_result_infos_wd
            # HACK: Good enough to avoid name collision
            wd_dest_name = os.path.basename(ri_to_use['working_directory']) + "_patched"
            patch_count += 1
        else:
            ri_to_use = ri
            wd_path_prefix = original_result_infos_wd
            wd_dest_name = os.path.basename(ri_to_use['working_directory'])
        wd_path = join_path(wd_path_prefix, ri_to_use['working_directory'])
        if not check_dir_exists(wd_path):
            return 1
        assert wd_path not in workdirs_to_copy
        # Patch paths if necessary
        ri_to_use = patch_ri_paths(ri_to_use, wd_dest_name)
        assert wd_dest_name not in used_dest_names
        workdirs_to_copy[wd_path] = wd_dest_name
        new_results.append(ri_to_use)
        used_keys.add(key)
        used_dest_names.add(wd_dest_name)

    # Compute new results to add
    _logger.info('Adding new results')
    add_count = 0
    new_keys = set(key_to_patch_result.keys()).difference(used_keys)
    for key in new_keys:
        add_count += 1
        new_results.append(key_to_patch_result[key])

    print("# of patched results: {}".format(patch_count))
    print("# of new results: {}".format(add_count))

    # Output the new results as YAML
    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(new_rri)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')
    _logger.info('Writing to "{}"'.format(pargs.output_result_info.name))
    smtrunner.util.writeYaml(pargs.output_result_info, new_rri)
    _logger.info('Writing done')

    # Now create the new working directory by copying from other directories
    create_new_working_directories(pargs.output_result_infos_wd, workdirs_to_copy)
    return 0
コード例 #6
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base',
        dest="benchmark_base",
        default="",
        type=str)
    parser.add_argument('--wd-base',
        dest="wd_base",
        default="",
        type=str)
    parser.add_argument('--timeout',
        type=float,
        default=None,
        help='Timeout to assume when creating tags',
    )
    parser.add_argument('--use-dsoes-wallclock-time',
        action='store_true',
        default=False,
    )
    parser.add_argument('--output',
        type=argparse.FileType('w'),
        default=sys.stdout,
    )
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)
    _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base))
    _logger.info('Using working directory base of "{}"'.format(pargs.wd_base))

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    runner = result_infos['misc']['runner']
    _logger.info('Found runner "{}"'.format(runner))
    backend = None
    if 'backend' in result_infos['misc']:
        backend = result_infos['misc']['backend']
    _logger.info('Backend was "{}"'.format(backend))

    event_analyser = event_analysis.get_event_analyser_from_runner_name(
        runner,
        soft_timeout=pargs.timeout,
        use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time)
    if not isinstance(event_analyser, event_analysis.JFSRunnerEventAnalyser):
        _logger.error('Must be a JFS run')
        return 1
    new_results = result_infos.copy()
    new_results['results'] = []
    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        wd = ResultInfoUtil.get_result_info_wd(ri)
        geti = event_analysis.GETInfo(
            ri=ri,
            wd_base=pargs.wd_base,
            benchmark_base=pargs.benchmark_base,
            backend=backend
        )

        new_ri = ri.copy()

        num_inputs, num_wrong_sized_inputs, fuzzing_wallclock_time = event_analyser.get_fuzzing_throughput_fields(geti)
        assert num_inputs is None or isinstance(num_inputs, int)
        assert num_wrong_sized_inputs is None or isinstance(num_wrong_sized_inputs, int)
        assert fuzzing_wallclock_time is None or isinstance(fuzzing_wallclock_time, float)
        _logger.info('num_inputs = {} for {}'.format(num_inputs, key))
        _logger.info('num_wrong_sized_inputs = {} for {}'.format(num_wrong_sized_inputs, key))
        _logger.info('fuzzing_wallclock_time = {} for {}'.format(
            fuzzing_wallclock_time,
            key))

        # Get LibFuzzer stats
        libfuzzer_avg_exec = event_analyser.get_libfuzzer_stat_average_exec_per_sec(geti)
        new_ri['libfuzzer_average_exec_per_sec'] = libfuzzer_avg_exec
        _logger.info('libfuzzer_average_exec_per_sec = {} for {}'.format(
            libfuzzer_avg_exec,
            key))

        # Get event tag so we can determine when the through put information
        # should be available.
        tag = event_analyser.get_event_tag(geti)
        if tag is None:
            _logger.error('Unhandled event for "{}"'.format(key))
            _logger.error(pprint.pformat(ri))
            return 1
        if tag in {'sat', 'unsat', 'sat_but_expected_unsat', 'unsat_but_expected_sat'}:
            if num_inputs is None:
                _logger.error('num_inputs should not be None for {} ({})'.format(key, wd))
                return 1
            if num_wrong_sized_inputs is None:
                _logger.error('num_wrong_sized_inputs should not be None for {} ({})'.format(key, wd))
                return 1
            if fuzzing_wallclock_time is None:
                _logger.error('fuzzing_wallclock_time should not be None for {} ({})'.format(key, wd))
                return 1


        new_ri['jfs_stat_num_inputs'] = num_inputs
        new_ri['jfs_stat_num_wrong_sized_inputs'] = num_wrong_sized_inputs
        new_ri['jfs_stat_fuzzing_wallclock_time'] = fuzzing_wallclock_time
        new_results['results'].append(new_ri)

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(new_results)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, new_results)

    return 0