def main(args): global _logger global _fail_count parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r')) parser.add_argument('--base', type=str, default="") parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='Output location (default stdout)') pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) try: _logger.info('Loading "{}"'.format(pargs.result_infos.name)) result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') # START Apply filters new_results = result_infos['results'] new_results = add_dsoes_wallclock_time(new_results, pargs.base) # END Apply filters new_result_infos = result_infos new_result_infos['results'] = new_results if _fail_count > 0: _logger.warning('Failed to parse "{}" files'.format(_fail_count)) # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(new_result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, new_result_infos) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r')) parser.add_argument('ii_template', type=argparse.FileType('r')) parser.add_argument('--benchmark-base', dest="benchmark_base", default="", type=str) parser.add_argument('--wd-base', dest="wd_base", default="", type=str) parser.add_argument( '--dump-tags', dest="dump_tags", nargs='+', default=[], ) parser.add_argument( '--timeout', type=float, default=None, ) parser.add_argument( '--use-dsoes-wallclock-time', action='store_true', default=False, ) parser.add_argument( '--bool-args', dest='bool_args', nargs='+', default=[], ) parser.add_argument( '--output', default=sys.stdout, type=argparse.FileType('w'), ) pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base)) _logger.info('Using working directory base of "{}"'.format(pargs.wd_base)) extra_kwargs = {} bool_arg_re = re.compile(r'^([a-zA-z.]+)=(true|false)') for b in pargs.bool_args: m = bool_arg_re.match(b) if m is None: _logger.error('"{}" is not valid bool assignment'.format(b)) return 1 var_name = m.group(1) assignment = m.group(2) _logger.info('Adding extra param "{}" = {}'.format( var_name, assignment)) if assignment == 'true': assignment_as_bool = True else: assert assignment == 'false' assignment_as_bool = False extra_kwargs[var_name] = assignment_as_bool try: _logger.info('Loading "{}"'.format(pargs.result_infos.name)) result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos) _logger.info('Loading "{}"'.format(pargs.ii_template.name)) ii_template = ResultInfo.loadRawResultInfos(pargs.ii_template) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') # Do grouping _logger.info('Performing merge') result_info_list = [result_infos, ii_template] key_to_result_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by( result_info_list) list_was_empty = True for index, reject_list in enumerate(rejected_result_infos): for reject in reject_list: list_was_empty = False key = ResultInfoUtil.get_result_info_key(reject) _logger.info('{} was rejected'.format(key)) if not list_was_empty: return 1 _logger.info('Merge complete') runner = result_infos['misc']['runner'] _logger.info('Found runner "{}"'.format(runner)) backend = None if 'backend' in result_infos['misc']: backend = result_infos['misc']['backend'] _logger.info('Backend was "{}"'.format(backend)) output_ri = { 'results': [], 'schema_version': result_info_list[0]['schema_version'], } event_analyser = event_analysis.get_event_analyser_from_runner_name( runner, soft_timeout=pargs.timeout, use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time, **extra_kwargs) tag_to_keys = dict() non_trivial_known_tags = { 'jfs_generic_unknown', 'timeout', 'soft_timeout', 'jfs_dropped_stdout_bug_unknown', 'unsupported_bv_sort', 'unsupported_fp_sort', 'unsupported_sorts', } trivial_known_tags = { 'sat', 'jfs_dropped_stdout_bug_sat', 'jfs_dropped_stdout_bug_unsat', 'unsat', } trivial_keys = set() non_trivial_keys = set() for ri in result_infos['results']: key = ResultInfoUtil.get_result_info_key(ri) # Construct get event tag info geti = event_analysis.GETInfo(ri=ri, wd_base=pargs.wd_base, benchmark_base=pargs.benchmark_base, backend=backend) tag = event_analyser.get_event_tag(geti) if tag is None: _logger.error('Unhandled event for "{}"'.format(key)) _logger.error(pprint.pformat(ri)) return 1 # The assumption here is that we are using JFS is dummy solving # mode. Benchmarks that aren't sat are non-trivial and so we should # annotate as such. is_trivial = False if tag in trivial_known_tags: is_trivial = True trivial_keys.add(key) else: if tag not in non_trivial_known_tags: _logger.error('Unsupported tag {} for {}'.format(tag, key)) return 1 non_trivial_keys.add(key) corresponding_ri = key_to_result_infos[key][1].copy() corresponding_ri['is_trivial'] = is_trivial output_ri['results'].append(corresponding_ri) _logger.info('# of trivial benchmarks: {}'.format(len(trivial_keys))) _logger.info('# of non-trivial benchmarks: {}'.format( len(non_trivial_keys))) # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(output_ri) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, output_ri) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('original_result_infos', type=argparse.FileType('r')) parser.add_argument('original_result_infos_wd', type=str) parser.add_argument('patch_result_infos', type=argparse.FileType('r')) parser.add_argument('patch_result_infos_wd', type=str) parser.add_argument('output_result_info', type=argparse.FileType('w'), default=sys.stdout, help='Output location for result info YAML file') parser.add_argument('output_result_infos_wd', type=str) pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) # Validate paths if not check_dir_exists(pargs.original_result_infos_wd): return 1 original_result_infos_wd = pargs.original_result_infos_wd if not check_dir_exists(pargs.patch_result_infos_wd): return 1 patch_result_infos_wd = pargs.patch_result_infos_wd if os.path.exists(pargs.output_result_infos_wd): _logger.error('"{}" already exists'.format(pargs.output_result_infos_wd)) return 1 # Load YAML files original_raw_results_info = load_yaml(pargs.original_result_infos) if original_raw_results_info is None: return 1 patch_raw_result_infos = load_yaml(pargs.patch_result_infos) if patch_raw_result_infos is None: return 1 _logger.info('Loading done') # Group patch results by key for look-up key_to_patch_result = dict() for ri in patch_raw_result_infos['results']: key = ResultInfoUtil.get_result_info_key(ri) assert key not in key_to_patch_result key_to_patch_result[key] = ri # Construct new results info new_rri = original_raw_results_info.copy() # shallow copy new_results = [] new_rri['results'] = new_results # Absolute paths to copy into new working directory map to destination name workdirs_to_copy = dict() used_keys = set() used_dest_names = set() patch_count = 0 _logger.info('Constructing new results') for ri in original_raw_results_info['results']: key = ResultInfoUtil.get_result_info_key(ri) assert key not in used_keys ri_to_use = None wd_path_prefix = None wd_dest_name = None if key in key_to_patch_result: ri_to_use = key_to_patch_result[key] wd_path_prefix = patch_result_infos_wd # HACK: Good enough to avoid name collision wd_dest_name = os.path.basename(ri_to_use['working_directory']) + "_patched" patch_count += 1 else: ri_to_use = ri wd_path_prefix = original_result_infos_wd wd_dest_name = os.path.basename(ri_to_use['working_directory']) wd_path = join_path(wd_path_prefix, ri_to_use['working_directory']) if not check_dir_exists(wd_path): return 1 assert wd_path not in workdirs_to_copy # Patch paths if necessary ri_to_use = patch_ri_paths(ri_to_use, wd_dest_name) assert wd_dest_name not in used_dest_names workdirs_to_copy[wd_path] = wd_dest_name new_results.append(ri_to_use) used_keys.add(key) used_dest_names.add(wd_dest_name) # Compute new results to add _logger.info('Adding new results') add_count = 0 new_keys = set(key_to_patch_result.keys()).difference(used_keys) for key in new_keys: add_count += 1 new_results.append(key_to_patch_result[key]) print("# of patched results: {}".format(patch_count)) print("# of new results: {}".format(add_count)) # Output the new results as YAML # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(new_rri) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') _logger.info('Writing to "{}"'.format(pargs.output_result_info.name)) smtrunner.util.writeYaml(pargs.output_result_info, new_rri) _logger.info('Writing done') # Now create the new working directory by copying from other directories create_new_working_directories(pargs.output_result_infos_wd, workdirs_to_copy) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r'), nargs='+') parser.add_argument( '--bin-width', type=int, default=5, dest='bin_width', help='Histogram bin width in seconds (default %(default)s)', ) parser.add_argument( '--use-result-with-index', dest='use_result_with_index', default=-1, help= 'When outputting result info pick the result info for the benchmark from specified index. If -1 then pick from the relevant result info', ) parser.add_argument( '--max-time', type=int, default=120, dest='max_time', help='Assumed max time is seconds (default %(default)s)', ) parser.add_argument('--random-seed', type=int, default=0, dest='random_seed') parser.add_argument( '--bound', type=int, default=100, help='Maximum number of benchmarks to gather (default %(default)s)', ) parser.add_argument( '--keep-on-pick', dest='keep_on_pick', help='When selecting benchmark keep it in histogram', default=False, action='store_true', ) parser.add_argument( '--selection-mode', dest='selection_mode', default='inv_height_probability', choices=['inv_height_probability', 'rand_bin'], ) parser.add_argument( '--seed-selection-from', dest='seed_selection_from', default=None, help='Seed selected benchmarks from supplied invocation info file') parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='Output location (default stdout)') parser.add_argument('--hack-check-bins-included-with-count-lt', type=int, dest='hack_check_bins_included_with_count_less_than', default=None) pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) result_infos_list = [] for of in pargs.result_infos: try: _logger.info('Loading "{}"'.format(of.name)) result_infos = ResultInfo.loadRawResultInfos(of) result_infos_list.append(result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done. Loaded {} result info files'.format( len(result_infos_list))) # Set random seed random.seed(pargs.random_seed) key_fn = ResultInfoUtil.get_result_info_key # Benchmarks to keep, this is used for checking if a HSM # has given us a benchmark we have already selected btk = set() btk_to_result_info_index = dict() if pargs.seed_selection_from: if not os.path.exists(pargs.seed_selection_from): _logger.error('{} does not exist'.format( pargs.seed_selection_from)) return 1 with open(pargs.seed_selection_from, 'r') as f: _logger.info('Seeding selection from {}'.format(f.name)) ris_for_seeding = ResultInfo.loadRawResultInfos(f) # Now pull keys from the the result info file for ri in ris_for_seeding['results']: if len(btk) >= pargs.bound: _logger.info('Bound reached') break key_for_ri = key_fn(ri) btk.add(key_for_ri) # HACK: Lie about the source btk_to_result_info_index[key_for_ri] = 0 _logger.info('Seeded selection with {} benchmarks'.format( len(btk))) assert len(btk) == len(btk_to_result_info_index.keys()) # Group key_to_result_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by( result_infos_list, key_fn=key_fn) if len(rejected_result_infos): for index, l in enumerate(rejected_result_infos): if len(l) > 0: _logger.error('Found rejected result infos:\n{}'.format( pprint.pformat(rejected_result_infos))) return 1 histogram_sms = [] for result_infos in result_infos_list: histogram_sms.append( HistogramStateMachine(result_infos['results'], pargs.bin_width, pargs.max_time, key_fn)) original_sms = histogram_sms.copy() # Shallow copy # HACK: Do check desirable_keys = set() if pargs.hack_check_bins_included_with_count_less_than: _logger.info( 'Doing hack - check bins with count less than {} are included'. format(pargs.hack_check_bins_included_with_count_less_than)) assert pargs.hack_check_bins_included_with_count_less_than > 0 # Walk through the bins and collect all keys where # bin count is less than the specified value. for hsm in histogram_sms: for bin in hsm.bins: if bin.getSize( ) < pargs.hack_check_bins_included_with_count_less_than: _logger.info( 'Adding keys from bin {} to desirable keys'.format( bin.getBounds())) _logger.debug('Adding keys:\n{}'.format( pprint.pformat(bin.getKeys()))) desirable_keys.update(bin.getKeys()) _logger.info('{} keys in set of desirable benchmarks'.format( len(desirable_keys))) # Keep picking round robin between the state machines until # a bound is reached. _logger.info( 'Beginning {} selection with bound of {} and seed of {} benchmarks'. format(pargs.selection_mode, pargs.bound, len(btk))) initialBtkSize = len(btk) while len(btk) < pargs.bound: if len(histogram_sms) == 0: _logger.warning('Exhausted all histogram SMs') break hsms_to_remove = set() # Go through sms in round robin order. for index, hsm in enumerate(histogram_sms): if len(btk) >= pargs.bound: # Don't allow bound to be exceeded. break benchmark_key = None while benchmark_key is None: # Based on selection mode pick a benchmark if pargs.selection_mode == 'inv_height_probability': benchmark_key = hsm.getNext( remove_item=not pargs.keep_on_pick) elif pargs.selection_mode == 'rand_bin': benchmark_key = hsm.getNextRandBin( remove_item=not pargs.keep_on_pick) else: raise Exception('Unsupported selection mode') _logger.debug('Got key {}'.format(benchmark_key)) if benchmark_key is None: # hsm exhausted _logger.debug('HSM index {} exhausted'.format(index)) hsms_to_remove.add(index) break if benchmark_key in btk: _logger.debug('Already have key {}'.format(benchmark_key)) # We already have this benchmark # Try picking another. benchmark_key = None continue if benchmark_key is not None: _logger.debug('Adding key {}'.format(benchmark_key)) assert benchmark_key not in btk_to_result_info_index assert benchmark_key not in btk btk_to_result_info_index[benchmark_key] = index btk.add(benchmark_key) if len(hsms_to_remove) > 0: new_hsms = [] for index, hsm in enumerate(histogram_sms): if index not in hsms_to_remove: _logger.debug('keeping HSM {}'.format(index)) new_hsms.append(hsm) else: _logger.debug('dropping HSM {}'.format(index)) histogram_sms = new_hsms _logger.info('Selected {} benchmarks'.format(len(btk) - initialBtkSize)) _logger.info('Final selection has {} benchmarks'.format(len(btk))) assert len(btk) == len(btk_to_result_info_index) new_result_infos = { 'results': [], 'schema_version': 0, } new_results_list = new_result_infos['results'] used_programs = set() # Grab the result info by key for key, result_info_index in sorted(btk_to_result_info_index.items(), key=lambda tup: tup[0]): # Just pick the first index_to_use = pargs.use_result_with_index if index_to_use == -1: # Use result info corresponding to the result info we took it from index_to_use = result_info_index _logger.debug('Selected key {} from result_info_index {}'.format( key, index_to_use)) ri = key_to_result_infos[key][index_to_use] _logger.debug('Grabbed {}'.format(key_to_result_infos[key])) _logger.debug('Grabbed {}'.format(ri)) new_results_list.append(ri) if key in used_programs: # Sanity check _logger.error( 'Selected key ({}) that has already been used'.format(key)) return 1 used_programs.add(key) # HACK: if pargs.hack_check_bins_included_with_count_less_than: missing_desirable_benchmarks = desirable_keys.difference( set(btk_to_result_info_index.keys())) _logger.warning( '{} desirable benchmarks missing from selection'.format( len(missing_desirable_benchmarks))) if len(missing_desirable_benchmarks) > 0: _logger.error( 'Desirable {} benchmarks missing from selection:\n{}'.format( len(missing_desirable_benchmarks), pprint.pformat(missing_desirable_benchmarks))) return 1 # Validate against schema try: _logger.info('Validating new_result_infos') ResultInfo.validateResultInfos(new_result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, new_result_infos) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r')) parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='Output location (default stdout)') pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) try: _logger.info('Loading "{}"'.format(pargs.result_infos.name)) result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') keys_to_strip = [ 'sat', 'wallclock_time', 'working_directory', 'exit_code', 'out_of_memory', 'stdout_log_file', 'stderr_log_file', 'user_cpu_time', 'sys_cpu_time', 'backend_timeout', 'merged_result', 'error', 'dsoes_wallclock_time', 'event_tag', ] for r in result_infos['results']: if 'expected_sat' in r and 'sat' in r: # Result might be merged so use `get_sat_from_result_info expected_sat, es_conflict = analysis.get_expected_sat_from_result_info(r) sat, s_conflict = analysis.get_sat_from_result_info(r) if es_conflict or s_conflict: _logger.warning('Found conflict for {}'.format( r['benchmark'])) # If the result is merged this will flatten the result if expected_sat == 'unknown' and sat != 'unknown': _logger.info('Copying over sat for {}'.format(r['benchmark'])) r['expected_sat'] = sat else: _logger.debug('Preserving expected_sat') r['expected_sat'] = expected_sat # strip keys for key in keys_to_strip: if key in r: r.pop(key, None) if 'misc' in result_infos: result_infos.pop('misc') # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, result_infos) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r'), nargs='+') parser.add_argument('--wd-bases', type=str, default=[], nargs='+') parser.add_argument( '--allow-merge-failures', dest='allow_merge_failures', default=False, action='store_true', ) parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='Output location (default stdout)') pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) if len(pargs.wd_bases) > 0: if len(pargs.wd_bases) != len(pargs.result_infos): _logger.error( 'Number of working directory bases must = number of result info files' ) return 1 for wd_base in pargs.wd_bases: if not os.path.exists(wd_base): _logger.error('"{}" does not exist'.format(wd_base)) return 1 if not os.path.isdir(wd_base): _logger.error('"{}" is not a directory'.format(wd_base)) return 1 if not os.path.isabs(wd_base): _logger.error('"{}" must be an absolute path'.format(wd_base)) return 1 index_to_raw_result_infos = [] for index, result_infos_file in enumerate(pargs.result_infos): try: _logger.info('Loading "{}"'.format(result_infos_file.name)) result_infos = ResultInfo.loadRawResultInfos(result_infos_file) index_to_raw_result_infos.append(result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') result_infos = None # HACK: Do something smarter here merged = { 'misc': { 'runner': index_to_raw_result_infos[0]['misc']['runner'], }, 'results': [], 'schema_version': index_to_raw_result_infos[0]['schema_version'], } # Perform grouping by benchmark name key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by( index_to_raw_result_infos) if len(rejected_result_infos) > 0: l_was_empty = True for index, l in enumerate(rejected_result_infos): _logger.warning('Index {} had {} rejections'.format(index, len(l))) if len(l) > 0: _logger.warning('There were rejected result infos') l_was_empty = False if not l_was_empty: if pargs.allow_merge_failures: _logger.warning('Merge failures being allowed') else: _logger.error('Merge failures are not allowed') return 1 merged_key_result_info, merge_failures = ResultInfoUtil.merge_raw_result_infos( key_to_results_infos, allow_merge_errors=False, wd_bases=pargs.wd_bases if len(pargs.wd_bases) > 0 else None) if len(merge_failures) > 0: _logger.error('There were merge failures') return 1 # TODO: sort by key for key, merged_result in sorted(merged_key_result_info.items()): merged['results'].append(merged_result) # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(merged) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, merged) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r')) parser.add_argument('--base', type=str, default="") parser.add_argument('--random-seed', type=int, default=0, dest='random_seed') parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='Output location (default stdout)') # START filter arguments parser.add_argument( '--filter-out-expected-sat', dest='filter_out_expected_sat', nargs='+', # gather into list choices=['sat', 'unsat', 'unknown'], default=[], ) parser.add_argument( '--filter-out-sat', dest='filter_out_sat', nargs='+', # gather into list choices=['sat', 'unsat', 'unknown'], default=[], ) parser.add_argument( '--filter-random-percentage', dest='filter_random_percentange', type=float, default=None, ) parser.add_argument( '--filter-keep-benchmark-matching-regex', dest='filter_keep_benchmarks_matching_regex', type=str, default=None, ) parser.add_argument( '--filter-out-benchmark-matching-regex', dest='filter_out_benchmarks_matching_regex', type=str, default=None, ) parser.add_argument( '--filter-keep-benchmark-matching-exit-code', dest='filter_keep_benchmarks_matching_exit_code', type=int, default=None, ) parser.add_argument( '--filter-keep-non-trivial', default=False, action='store_true', ) parser.add_argument( '--filter-keep-trivial', default=False, action='store_true', ) parser.add_argument( '--filter-keep-benchmarks-from-file', dest='filter_keep_benchmarks_from_file', type=argparse.FileType('r'), ) parser.add_argument( '--filter-out-benchmarks-from-file', dest='filter_out_benchmarks_from_file', type=argparse.FileType('r'), ) # END filter arguments pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) try: _logger.info('Loading "{}"'.format(pargs.result_infos.name)) result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') # Set random seed random.seed(pargs.random_seed) # START Apply filters new_results = result_infos['results'] if len(pargs.filter_out_expected_sat) > 0: new_results = filter_out_expected_sat_types( new_results, pargs.filter_out_expected_sat, pargs.base) if len(pargs.filter_out_sat) > 0: new_results = filter_out_sat_types(new_results, pargs.filter_out_sat, pargs.base) if pargs.filter_random_percentange is not None: if not (pargs.filter_random_percentange >= 0.0 and pargs.filter_random_percentange <= 1.0): _logger.error('Filter percentage must be in range [0.0, 1.0]') return 1 new_results = filter_random_percentange( new_results, pargs.filter_random_percentange) if pargs.filter_keep_benchmarks_matching_regex: new_results = filter_keep_benchmarks_matching_regex( new_results, pargs.filter_keep_benchmarks_matching_regex) if pargs.filter_out_benchmarks_matching_regex: new_results = filter_out_benchmarks_matching_regex( new_results, pargs.filter_out_benchmarks_matching_regex) if pargs.filter_keep_benchmarks_matching_exit_code is not None: new_results = filter_keep_benchmarks_matching_exit_code( new_results, pargs.filter_keep_benchmarks_matching_exit_code) if pargs.filter_keep_non_trivial: new_results = filter_keep_non_trivial_benchmarks(new_results) if pargs.filter_keep_trivial: new_results = filter_keep_trivial_benchmarks(new_results) if pargs.filter_out_benchmarks_from_file: new_results = filter_out_benchmarks_from_file( new_results, pargs.filter_out_benchmarks_from_file) if pargs.filter_keep_benchmarks_from_file: new_results = filter_keep_benchmarks_from_file( new_results, pargs.filter_keep_benchmarks_from_file) # END Apply filters new_result_infos = result_infos new_result_infos['results'] = new_results # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(new_result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, new_result_infos) return 0
def main(args): global _logger parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r')) parser.add_argument('--benchmark-base', dest="benchmark_base", default="", type=str) parser.add_argument('--wd-base', dest="wd_base", default="", type=str) parser.add_argument('--timeout', type=float, default=None, help='Timeout to assume when creating tags', ) parser.add_argument('--use-dsoes-wallclock-time', action='store_true', default=False, ) parser.add_argument('--output', type=argparse.FileType('w'), default=sys.stdout, ) pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base)) _logger.info('Using working directory base of "{}"'.format(pargs.wd_base)) try: _logger.info('Loading "{}"'.format(pargs.result_infos.name)) result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') runner = result_infos['misc']['runner'] _logger.info('Found runner "{}"'.format(runner)) backend = None if 'backend' in result_infos['misc']: backend = result_infos['misc']['backend'] _logger.info('Backend was "{}"'.format(backend)) event_analyser = event_analysis.get_event_analyser_from_runner_name( runner, soft_timeout=pargs.timeout, use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time) if not isinstance(event_analyser, event_analysis.JFSRunnerEventAnalyser): _logger.error('Must be a JFS run') return 1 new_results = result_infos.copy() new_results['results'] = [] for ri in result_infos['results']: key = ResultInfoUtil.get_result_info_key(ri) wd = ResultInfoUtil.get_result_info_wd(ri) geti = event_analysis.GETInfo( ri=ri, wd_base=pargs.wd_base, benchmark_base=pargs.benchmark_base, backend=backend ) new_ri = ri.copy() num_inputs, num_wrong_sized_inputs, fuzzing_wallclock_time = event_analyser.get_fuzzing_throughput_fields(geti) assert num_inputs is None or isinstance(num_inputs, int) assert num_wrong_sized_inputs is None or isinstance(num_wrong_sized_inputs, int) assert fuzzing_wallclock_time is None or isinstance(fuzzing_wallclock_time, float) _logger.info('num_inputs = {} for {}'.format(num_inputs, key)) _logger.info('num_wrong_sized_inputs = {} for {}'.format(num_wrong_sized_inputs, key)) _logger.info('fuzzing_wallclock_time = {} for {}'.format( fuzzing_wallclock_time, key)) # Get LibFuzzer stats libfuzzer_avg_exec = event_analyser.get_libfuzzer_stat_average_exec_per_sec(geti) new_ri['libfuzzer_average_exec_per_sec'] = libfuzzer_avg_exec _logger.info('libfuzzer_average_exec_per_sec = {} for {}'.format( libfuzzer_avg_exec, key)) # Get event tag so we can determine when the through put information # should be available. tag = event_analyser.get_event_tag(geti) if tag is None: _logger.error('Unhandled event for "{}"'.format(key)) _logger.error(pprint.pformat(ri)) return 1 if tag in {'sat', 'unsat', 'sat_but_expected_unsat', 'unsat_but_expected_sat'}: if num_inputs is None: _logger.error('num_inputs should not be None for {} ({})'.format(key, wd)) return 1 if num_wrong_sized_inputs is None: _logger.error('num_wrong_sized_inputs should not be None for {} ({})'.format(key, wd)) return 1 if fuzzing_wallclock_time is None: _logger.error('fuzzing_wallclock_time should not be None for {} ({})'.format(key, wd)) return 1 new_ri['jfs_stat_num_inputs'] = num_inputs new_ri['jfs_stat_num_wrong_sized_inputs'] = num_wrong_sized_inputs new_ri['jfs_stat_fuzzing_wallclock_time'] = fuzzing_wallclock_time new_results['results'].append(new_ri) # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(new_results) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, new_results) return 0
def main(args): global _logger global _fail_count parser = argparse.ArgumentParser(description=__doc__) DriverUtil.parserAddLoggerArg(parser) parser.add_argument('result_infos', type=argparse.FileType('r'), nargs='+') parser.add_argument('--names', nargs='+') parser.add_argument('--base', type=str, default="") parser.add_argument('--allow-merge-failures', dest='allow_merge_failures', default=False, action='store_true', ) parser.add_argument('--no-rank-unknown', dest='no_rank_unknown', default=False, action='store_true', ) parser.add_argument('--dump-wins', dest='dump_wins', default=False, action='store_true', ) parser.add_argument('--max-exec-time', default=None, type=float, dest='max_exec_time', ) parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='Output location (default stdout)') pargs = parser.parse_args(args) DriverUtil.handleLoggerArgs(pargs, parser) _logger = logging.getLogger(__name__) if not pargs.no_rank_unknown and pargs.max_exec_time is None: _logger.error('Max time must be specified') return 1 if pargs.names is None: _logger.error('--names must be specified') return 1 if len(pargs.names) != len(pargs.result_infos): _logger.error('Number of names must match number of result info files') return 1 index_to_name = pargs.names index_to_raw_result_infos = [] index_to_file_name = [] index_to_wins = [] for index, result_infos_file in enumerate(pargs.result_infos): try: _logger.info('Loading "{}"'.format(result_infos_file.name)) result_infos = ResultInfo.loadRawResultInfos(result_infos_file) index_to_raw_result_infos.append(result_infos) index_to_file_name.append(result_infos_file.name) index_to_wins.append(set()) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Loading done') result_infos = None # Perform grouping by benchmark name key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by( index_to_raw_result_infos) if len(rejected_result_infos) > 0: _logger.warning('There were rejected result infos') num_merge_failures = 0 for index, l in enumerate(rejected_result_infos): _logger.warning('Index {} had {} rejections'.format(index, len(l))) num_merge_failures += len(l) if num_merge_failures > 0: if pargs.allow_merge_failures: _logger.warning('Merge failures being allowed') else: _logger.error('Merge failures are not allowed') return 1 # HACK: Do something smarter here merged = { 'misc' : { 'runner': index_to_raw_result_infos[0]['misc']['runner'], 'synthesized_from': index_to_name, }, 'results': [ ], 'schema_version': index_to_raw_result_infos[0]['schema_version'], } failed_to_rank=set() for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]): _logger.info('Ranking on "{}" : '.format(key)) indices_to_use = [] # Compute indices to use modified_raw_result_info_list = [ ] # Handle "unknown" # Only compare results that gave sat/unsat for index, ri in enumerate(raw_result_info_list): sat, _ = analysis.get_sat_from_result_info(ri) _logger.info('index {} {}'.format(index, sat)) if sat != 'unknown': indices_to_use.append(index) modified_raw_result_info_list.append(ri) else: if pargs.no_rank_unknown: # Legacy modified_raw_result_info_list.append(ri) _logger.debug('Not using index {} for {} due to unknown'.format( index, key)) else: modified_ri = analysis.get_result_with_modified_time( ri, pargs.max_exec_time) _logger.debug('modified_ri: {}'.format(pprint.pformat(modified_ri))) _logger.debug('Treating index {} for {} due to unknown as having max-time'.format( index, key)) indices_to_use.append(index) modified_raw_result_info_list.append(modified_ri) _logger.debug('used indices_to_use: {}'.format(indices_to_use)) def append_result(winner_index): rank_failure = False if winner_index is None: # Failure rank_failure = True # Just use firtst winner_index = 0 copied_result = copy.deepcopy(raw_result_info_list[winner_index]) copied_result['rank_failure'] = rank_failure copied_result['rank_winner'] = index_to_name[winner_index] merged['results'].append(copied_result) if len(indices_to_use) == 0: # Can't rank failed_to_rank.add(key) append_result(None) continue ranked_indices, ordered_bounds = analysis.rank_by_execution_time( modified_raw_result_info_list, indices_to_use, pargs.max_exec_time, analysis.get_arithmetic_mean_and_99_confidence_intervals, ['dsoes_wallclock', 'wallclock']) _logger.info('Ranking on "{}" : {}'.format(key, ranked_indices)) _logger.info('Ranking on "{}" : {}'.format(key, ordered_bounds)) # Record win if len(ranked_indices[0]) == 1: # Winner winner_index = ranked_indices[0][0] _logger.info('Recorded win for {}'.format( index_to_file_name[winner_index])) index_to_wins[winner_index].add(key) append_result(winner_index) else: failed_to_rank.add(key) append_result(None) # Report wins for index, winner_key_set in enumerate(index_to_wins): name = index_to_file_name[index] print("# of wins for {}: {}".format(name, len(winner_key_set))) if pargs.dump_wins: print(pprint.pformat(sorted(list(winner_key_set)))) win_key = 'rank_wins_for_{}'.format(index_to_name[index]) merged['misc'][win_key] = len(winner_key_set) print("# failed to rank: {}".format(len(failed_to_rank))) merged['misc']['num_fail_to_rank'] = len(failed_to_rank) # Validate against schema try: _logger.info('Validating result_infos') ResultInfo.validateResultInfos(merged) except ResultInfo.ResultInfoValidationError as e: _logger.error('Validation error:\n{}'.format(e)) return 1 _logger.info('Validation complete') smtrunner.util.writeYaml(pargs.output, merged) return 0