コード例 #1
0
def _filter_benchmarks_from_file_imp(results, f, mode):
    assert mode == 'keep' or mode == 'out'
    rri_for_filter = ResultInfo.loadRawResultInfos(f)
    # Collect the keys present in the file
    keys = set()
    for ri in rri_for_filter['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        keys.add(key)
    report_initial(results)
    new_results = []
    if mode == 'keep':
        should_keep = lambda ri: ResultInfoUtil.get_result_info_key(ri) in keys
    elif mode == 'out':
        should_keep = lambda ri: ResultInfoUtil.get_result_info_key(
            ri) not in keys
    else:
        assert False

    for ri in results:
        if should_keep(ri):
            new_results.append(ri)
            _logger.debug('Keeping benchmark {}'.format(
                ResultInfoUtil.get_result_info_key(ri)))
        else:
            _logger.debug('Removing benchmark {}'.format(
                ResultInfoUtil.get_result_info_key(ri)))

    report_after(new_results, results)
    return new_results
コード例 #2
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'),
                        nargs='?',
                        default=sys.stdin)

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    expected_count = {
        'sat': 0,
        'unsat': 0,
        'unknown': 0,
        'total': 0,
    }
    for r in result_infos['results']:
        expected_sat = r['expected_sat']
        assert expected_sat in expected_count
        expected_count[expected_sat] += 1
        expected_count['total'] += 1

    print("Expected sat:\n", pprint.pformat(expected_count))
    return 0
コード例 #3
0
def main(args):
    global _logger
    global _fail_count
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--base', type=str, default="")
    parser.add_argument('-o',
                        '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location (default stdout)')

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    # START Apply filters
    new_results = result_infos['results']
    new_results = add_dsoes_wallclock_time(new_results, pargs.base)

    # END Apply filters
    new_result_infos = result_infos
    new_result_infos['results'] = new_results

    if _fail_count > 0:
        _logger.warning('Failed to parse "{}" files'.format(_fail_count))

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(new_result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, new_result_infos)
    return 0
コード例 #4
0
def load_yaml(arg):
    try:
        _logger.info('Loading "{}"'.format(arg.name))
        result_infos = ResultInfo.loadRawResultInfos(arg)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return None
    return result_infos
コード例 #5
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'),
                        nargs='?',
                        default=sys.stdin)

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    sat_count = {
        'sat': 0,
        'unsat': 0,
        'unknown': 0,
        'total': 0,
    }

    numMergeConflicts = 0
    isMergedResult = False
    for r in result_infos['results']:
        sat, hasMergeConflict = analysis.get_sat_from_result_info(r)
        sat_count['total'] += 1
        sat_count[sat] += 1
        if not isMergedResult:
            isMergedResult = analysis.is_merged_result_info(r)
        # Warn if merge conflict
        if hasMergeConflict:
            _logger.warning('Merge conflict for "{}"'.format(r['benchmark']))
            numMergeConflicts += 1
        # Warn if mis match
        expected_sat = r['expected_sat']
        if (sat == 'sat'
                and expected_sat == 'unsat') or (sat == 'unsat'
                                                 and expected_sat == 'sat'):
            _logger.warning('Expected sat and result mismatch for "{}"'.format(
                r['benchmark']))
    print("Sat:\n", pprint.pformat(sat_count))

    if isMergedResult:
        _logger.info('Is merged result')
        _logger.info('# of merge conflicts: {}'.format(numMergeConflicts))
    return 0
コード例 #6
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base', type=str, default=os.getcwd())
    parser.add_argument('dest_dir', type=str)

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    # Make destination if it doesn't exist
    try:
        os.mkdir(pargs.dest_dir)
    except FileExistsError:
        pass

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    destination_root = os.path.abspath(pargs.dest_dir)
    assert os.path.exists(destination_root)

    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        # Construct source path
        if key.startswith(os.path.sep):
            key = key[1:]
        src_path = os.path.join(pargs.benchmark_base, key)
        if not os.path.exists(src_path):
            _logger.error('{} does not exist'.format(src_path))
            return 1
        # Construct destination path
        dirs = os.path.dirname(key)
        filename = os.path.basename(key)

        dest_dir = os.path.join(destination_root, dirs)
        _logger.debug('Destination dir is {}'.format(dest_dir))
        os.makedirs(dest_dir, exist_ok=True)
        dest_path = os.path.join(dest_dir, filename)
        _logger.info('Copying {} => {}'.format(src_path, dest_path))
        shutil.copy2(src_path, dest_path)
    return 0
コード例 #7
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'),
                        nargs='?',
                        default=sys.stdin)

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    trivial_count = {
        'trivial': 0,
        'non_trivial': 0,
        'unknown': 0,
        'total': 0,
    }

    numMergeConflicts = 0
    isMergedResult = False
    for r in result_infos['results']:
        trivial_count['total'] += 1
        if 'is_trivial' not in r:
            trivial_count['unknown'] += 1
            continue
        is_trivial = r['is_trivial']
        assert isinstance(is_trivial, bool)
        if is_trivial:
            trivial_count['trivial'] += 1
        else:
            trivial_count['non_trivial'] += 1

    print(trivial_count)
    return 0
コード例 #8
0
def main(args):
    global _logger
    global _fail_count
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('first_result_info', type=argparse.FileType('r'))
    parser.add_argument('second_result_info', type=argparse.FileType('r'))
    parser.add_argument('--base', type=str, default="")
    parser.add_argument('--point-size',
                        type=float,
                        default=25.0,
                        dest='point_size')
    parser.add_argument(
        '--allow-merge-failures',
        dest='allow_merge_failures',
        default=False,
        action='store_true',
    )
    parser.add_argument(
        '--max-exec-time',
        default=None,
        type=float,
        dest='max_exec_time',
    )
    parser.add_argument(
        '--title',
        default="{num_keys} benchmarks, {num_points} jointly SAT or timeout")
    parser.add_argument(
        "--xlabel",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--ylabel",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--axis-label-suffix",
        type=str,
        default=" execution time (s)",
        dest="axis_label_suffix",
    )
    parser.add_argument(
        "--axis-label-colour",
        type=str,
        default="black",
        dest="axis_label_colour",
    )
    parser.add_argument(
        "--annotate",
        default=False,
        action='store_true',
    )
    parser.add_argument(
        "--annotate-use-legacy-values",
        default=False,
        action='store_true',
    )
    parser.add_argument(
        "--output",
        default=None,
        type=argparse.FileType('wb'),
    )
    parser.add_argument(
        "--error-bars",
        default=False,
        action='store_true',
    )
    parser.add_argument(
        "--annotate-timeout-point",
        dest='annotate_timeout_point',
        default=False,
        action='store_true',
    )
    parser.add_argument("--require-time-abs-diff",
                        dest="require_time_abs_diff",
                        default=0.0,
                        type=float)
    parser.add_argument('--true-type-fonts',
                        default=False,
                        action='store_true')

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if pargs.max_exec_time is None:
        _logger.error('--max-exec-time must be specified')
        return 1

    if pargs.true_type_fonts:
        smtrunner.util.set_true_type_font()

    index_to_raw_result_infos = []
    index_to_file_name = []
    for index, result_infos_file in enumerate(
        [pargs.first_result_info, pargs.second_result_info]):
        try:
            _logger.info('Loading "{}"'.format(result_infos_file.name))
            result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
            index_to_raw_result_infos.append(result_infos)
            index_to_file_name.append(result_infos_file.name)
        except ResultInfo.ResultInfoValidationError as e:
            _logger.error('Validation error:\n{}'.format(e))
            return 1
        _logger.info('Loading done')
    result_infos = None

    # Perform grouping by benchmark name
    key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        index_to_raw_result_infos)
    if len(rejected_result_infos) > 0:
        _logger.warning('There were rejected result infos')
        num_merge_failures = 0
        for index, l in enumerate(rejected_result_infos):
            _logger.warning('Index {} had {} rejections'.format(index, len(l)))
            num_merge_failures += len(l)
        if num_merge_failures > 0:
            if pargs.allow_merge_failures:
                _logger.warning('Merge failures being allowed')
            else:
                _logger.error('Merge failures are not allowed')
                return 1

    # Generate scatter points
    x_scatter_points = []
    x_scatter_errors = [[], []]
    y_scatter_points = []
    y_scatter_errors = [[], []]
    count_dual_timeout = 0
    count_x_lt_y_not_dt = 0
    count_x_gt_y_not_dt = 0
    count_x_eq_y_not_dt = 0

    # New counting vars
    bounds_incomparable_keys = set()
    x_gt_y_keys = set()
    x_lt_y_keys = set()
    x_eq_y_keys = set()
    x_eq_y_and_is_timeout_keys = set()

    for key, raw_result_info_list in sorted(key_to_results_infos.items(),
                                            key=lambda kv: kv[0]):
        _logger.info('Ranking on "{}" : '.format(key))
        indices_to_use = []
        # Compute indices to use
        modified_raw_result_info_list = []
        # Handle "unknown"
        # Only compare results that gave sat/unsat
        for index, ri in enumerate(raw_result_info_list):
            if isinstance(ri['event_tag'], str):
                # single result
                event_tag = ri['event_tag']
            else:
                assert isinstance(ri['event_tag'], list)
                event_tag, _ = event_analysis.merge_aggregate_events(
                    ri['event_tag'])

            # Event must be sat or timeout
            _logger.info('index {} is {}'.format(index, event_tag))
            if event_tag not in {'sat', 'timeout', 'soft_timeout'}:
                # Skip this. We can't do a meaningful comparison here
                continue
            indices_to_use.append(index)
            # Normalise timeouts to have fixed values for the time.
            if event_tag in {'timeout', 'soft_timeout'}:
                modified_ri = analysis.get_result_with_modified_time(
                    ri, pargs.max_exec_time)
                _logger.debug('modified_ri: {}'.format(
                    pprint.pformat(modified_ri)))
                _logger.debug(
                    'Treating index {} for {} due to unknown as having max-time'
                    .format(index, key))
                modified_raw_result_info_list.append(modified_ri)
            else:
                modified_raw_result_info_list.append(ri)
        _logger.debug('used indices_to_use: {}'.format(indices_to_use))

        if len(indices_to_use) != 2:
            # Skip this one. One of the result infos can't be compared
            # against.
            continue

        assert len(indices_to_use) == 2
        # Get execution times
        index_to_execution_time_bounds = analysis.get_index_to_execution_time_bounds(
            modified_raw_result_info_list, indices_to_use, pargs.max_exec_time,
            analysis.get_arithmetic_mean_and_99_confidence_intervals,
            ['dsoes_wallclock', 'wallclock'])
        assert isinstance(index_to_execution_time_bounds, list)
        x_scatter_point_bounds = index_to_execution_time_bounds[0]
        y_scatter_point_bounds = index_to_execution_time_bounds[1]
        x_scatter_point = x_scatter_point_bounds[1]  # mean
        y_scatter_point = y_scatter_point_bounds[1]  # mean
        x_scatter_lower_error = x_scatter_point_bounds[
            1] - x_scatter_point_bounds[0]
        assert x_scatter_lower_error >= 0
        x_scatter_higher_error = x_scatter_point_bounds[
            2] - x_scatter_point_bounds[1]
        assert x_scatter_higher_error >= 0
        y_scatter_lower_error = y_scatter_point_bounds[
            1] - y_scatter_point_bounds[0]
        assert y_scatter_lower_error >= 0
        y_scatter_higher_error = y_scatter_point_bounds[
            2] - y_scatter_point_bounds[1]
        assert y_scatter_higher_error >= 0

        x_scatter_points.append(x_scatter_point)
        y_scatter_points.append(y_scatter_point)
        # Error bar points
        #x_scatter_errors.append((x_scatter_lower_error, x_scatter_higher_error))
        x_scatter_errors[0].append(x_scatter_lower_error)
        x_scatter_errors[1].append(x_scatter_higher_error)
        #y_scatter_errors.append((y_scatter_lower_error, y_scatter_higher_error))
        y_scatter_errors[0].append(y_scatter_lower_error)
        y_scatter_errors[1].append(y_scatter_higher_error)

        # LEGACY: Now do some counting
        if x_scatter_point == y_scatter_point:
            if x_scatter_point == pargs.max_exec_time:
                assert x_scatter_lower_error == 0
                assert x_scatter_higher_error == 0
                assert y_scatter_lower_error == 0
                assert y_scatter_higher_error == 0
                count_dual_timeout += 1
            else:
                _logger.info(
                    'Found count_x_eq_y_not_dt: x: {}, key: {}'.format(
                        x_scatter_point, key))
                count_x_eq_y_not_dt += 1
        elif x_scatter_point > y_scatter_point:
            count_x_gt_y_not_dt += 1
        else:
            assert x_scatter_point < y_scatter_point
            count_x_lt_y_not_dt += 1

        # SMARTER counting: uses error bounds
        if analysis.bounds_overlap(x_scatter_point_bounds,
                                   y_scatter_point_bounds):
            # Bounds overlap, we can't compare the execution times in a meaningful way
            bounds_incomparable_keys.add(key)
            # However if both are timeouts we can note this
            if x_scatter_point == pargs.max_exec_time:
                x_eq_y_and_is_timeout_keys.add(key)
        else:
            # Compare the means
            if x_scatter_point > y_scatter_point and abs(
                    x_scatter_point -
                    y_scatter_point) > pargs.require_time_abs_diff:
                x_gt_y_keys.add(key)
            elif x_scatter_point < y_scatter_point and abs(
                    x_scatter_point -
                    y_scatter_point) > pargs.require_time_abs_diff:
                x_lt_y_keys.add(key)
            else:
                if pargs.require_time_abs_diff == 0.0:
                    assert x_scatter_point == y_scatter_point
                x_eq_y_keys.add(key)

    # Report counts
    print("# of points : {}".format(len(x_scatter_points)))
    print("LEGACY: count_dual_timeout: {}".format(count_dual_timeout))
    print("LEGACY: count_x_eq_y_not_dt: {}".format(count_x_eq_y_not_dt))
    print("LEGACY: count_x_gt_y_not_dt: {}".format(count_x_gt_y_not_dt))
    print("LEGACY: count_x_lt_y_not_dt: {}".format(count_x_lt_y_not_dt))
    print("")
    print("# x > y and no bound overlap: {}".format(len(x_gt_y_keys)))
    print("# x < y and no bound overlap: {}".format(len(x_lt_y_keys)))
    print("# x = y and no bound overlap: {}".format(len(x_eq_y_keys)))
    print("# incomparable: {}".format(len(bounds_incomparable_keys)))
    print("# of x = y and is timeout: {}".format(
        len(x_eq_y_and_is_timeout_keys)))

    # Now plot
    extend = 100
    tickFreq = 100
    assert len(x_scatter_points) == len(y_scatter_points)
    fig, ax = plt.subplots()
    fig.patch.set_alpha(0.0)  # Transparent
    if pargs.error_bars:
        splot = ax.errorbar(
            x_scatter_points,
            y_scatter_points,
            xerr=x_scatter_errors,
            yerr=y_scatter_errors,
            fmt='o',
            picker=5,
            ms=pargs.point_size / 2.0,  # HACK
            ecolor='black',
            capsize=5,
            #capthick=10,
        )
    else:
        splot = ax.scatter(x_scatter_points,
                           y_scatter_points,
                           picker=5,
                           s=pargs.point_size)
    xlabel = index_to_file_name[0] if pargs.xlabel is None else pargs.xlabel
    ylabel = index_to_file_name[1] if pargs.ylabel is None else pargs.ylabel
    xlabel += pargs.axis_label_suffix
    ylabel += pargs.axis_label_suffix
    ax.xaxis.label.set_color(pargs.axis_label_colour)
    ax.yaxis.label.set_color(pargs.axis_label_colour)
    ax.tick_params(axis='x', colors=pargs.axis_label_colour)
    ax.tick_params(axis='y', colors=pargs.axis_label_colour)
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)

    ax.set_xlim(0, pargs.max_exec_time + extend)
    ax.set_ylim(0, pargs.max_exec_time + extend)
    # +1 is just so the pargs.max_exec_time is included because range()'s end is not inclusive
    ax.set_xticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
    ax.set_yticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))

    # Construct title keyword args
    title_kwargs = {
        'num_points': len(x_scatter_points),
        'xlabel': xlabel,
        'ylabel': ylabel,
        'num_keys': len(key_to_results_infos.keys()),
    }
    ax.set_title(pargs.title.format(**title_kwargs))

    # Identity line
    ax.plot([0, pargs.max_exec_time + extend],
            [0, pargs.max_exec_time + extend],
            linewidth=1.0,
            color='black')

    if pargs.annotate:
        if pargs.annotate_use_legacy_values:
            _logger.warning('Displaying legacy values')
            x_lt_value_to_display = count_x_lt_y_not_dt
            x_gt_value_to_display = count_x_gt_y_not_dt
        else:
            _logger.info('Displaying new values')
            x_lt_value_to_display = len(x_lt_y_keys)
            x_gt_value_to_display = len(x_gt_y_keys)

        ax.annotate('{}'.format(x_lt_value_to_display),
                    xy=(200, 550),
                    fontsize=40)
        ax.annotate('{}'.format(x_gt_value_to_display),
                    xy=(550, 200),
                    fontsize=40)

    # timeout point annotation
    if pargs.annotate_timeout_point:
        num_dual_timeouts = len(x_eq_y_and_is_timeout_keys)
        dual_timeout_txt = None
        if num_dual_timeouts == 1:
            dual_timeout_txt = '{} dual timeout'.format(num_dual_timeouts)
        else:
            dual_timeout_txt = '{} dual timeouts'.format(num_dual_timeouts)

        ax.annotate(
            dual_timeout_txt,
            # HACK -5 is to offset arrow properly
            xy=(pargs.max_exec_time - 15.00, pargs.max_exec_time),
            xycoords='data',
            xytext=(-50, 0),
            textcoords='offset points',
            arrowprops=dict(facecolor='black',
                            shrink=0.05,
                            width=1.5,
                            headwidth=7.0),
            horizontalalignment='right',
            verticalalignment='center',
            bbox=dict(boxstyle='round', fc='None'),
            fontsize=15)

    # Finally show
    if pargs.output is None:
        plt.show()
    else:
        # For command line usage
        fig.show()
        fig.savefig(pargs.output, format='pdf')
    return 0
コード例 #9
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("--plot-bins",
                        type=int,
                        dest='plot_number_of_bins',
                        default=100,
                        help='Number of bins for histogram plot')
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--base', type=str, default="")
    parser.add_argument(
        '--report-num-under',
        dest='report_num_under',
        type=int,
        default=0,
        help='Report number of benchmarks in histogram <= this size',
    )
    parser.add_argument('--max-exec-time',
                        dest='max_exec_time',
                        type=int,
                        default=-1,
                        help="If non-negative give explicit max time")
    parser.add_argument(
        '--force-title',
        dest='force_title',
        type=str,
        default=None,
        help="Force plot use supplied title",
    )
    parser.add_argument('--true-type-fonts',
                        default=False,
                        action='store_true')

    DriverUtil.parserAddLoggerArg(parser)
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if pargs.true_type_fonts:
        smtrunner.util.set_true_type_font()

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    if pargs.force_title is not None:
        title = pargs.force_title
    else:
        title = os.path.abspath(pargs.result_infos.name)

    max_time = None
    if pargs.max_exec_time > 0:
        max_time = pargs.max_exec_time

    report_num_under = None
    if pargs.report_num_under > 0:
        report_num_under = pargs.report_num_under

    plot_histogram(result_infos['results'], pargs.plot_number_of_bins, title,
                   max_time, report_num_under)
    return 0
def main(args):
    global _logger
    global _fail_count
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('--true-type-fonts',
                        default=False,
                        action='store_true')
    parser.add_argument('result_infos', nargs='+', help='Input YAML files')
    parser.add_argument('--title', default="", type=str)
    parser.add_argument('--legend-name-map',
                        dest='legend_name_map',
                        default=None,
                        type=str)
    parser.add_argument(
        '--legend-position',
        dest='legend_position',
        default='outside_bottom',
        choices=['outside_bottom', 'outside_right', 'inner', 'none'])
    parser.add_argument('--report-negative-results',
                        dest='report_negative_results',
                        default=False,
                        action='store_true')
    parser.add_argument('--legend-font-size',
                        dest='legend_font_size',
                        default=None,
                        type=int)
    parser.add_argument('--draw-style',
                        dest='drawstyle',
                        choices=['steps', 'default'],
                        default='default',
                        help='Line draw style')
    parser.add_argument('--legend-num-columns',
                        dest='legend_num_columns',
                        default=3,
                        type=int)
    actionGroup = parser.add_mutually_exclusive_group()
    actionGroup.add_argument('--ipython', action='store_true')
    actionGroup.add_argument('--pdf', help='Write graph to PDF')
    actionGroup.add_argument('--svg', help='Write graph to svg')

    plotGroup = parser.add_mutually_exclusive_group()
    plotGroup.add_argument("--points", action='store_true')
    plotGroup.add_argument("--error-bars",
                           action='store_true',
                           dest='error_bars')
    parser.add_argument('--point-size',
                        type=float,
                        default=3.0,
                        dest='point_size')
    parser.add_argument(
        '--allow-merge-failures',
        dest='allow_merge_failures',
        default=False,
        action='store_true',
    )
    parser.add_argument(
        '--max-exec-time',
        default=None,
        type=float,
        dest='max_exec_time',
    )
    parser.add_argument(
        '--mode',
        choices=['time', 'fuzzing_throughput'],
        default='time',
    )
    parser.add_argument(
        '--fuzzing-point-ordering',
        choices=[
            'independent', 'max_score', 'max_throughput', 'max_mean_throughput'
        ],
        dest='fuzzing_point_ordering',
        default=None,
    )

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if pargs.mode == 'time' and pargs.max_exec_time is None:
        _logger.error('--max-exec-time must be specified')
        return 1
    if pargs.max_exec_time is not None and pargs.mode == 'fuzzing_throughput':
        _logger.warning('Ignoring --max-time')
        pargs.max_exec_time = None

    if pargs.pdf != None:
        if not pargs.pdf.endswith('.pdf'):
            logging.error('--pdf argument must end with .pdf')
            return 1
        if os.path.exists(pargs.pdf):
            logging.error('Refusing to overwrite {}'.format(pargs.pdf))
            return 1

    if pargs.svg != None:
        if not pargs.svg.endswith('.svg'):
            logging.error('--pdf argument must end with .svg')
            return 1
        if os.path.exists(pargs.svg):
            logging.error('Refusing to overwrite {}'.format(pargs.svg))
            return 1

    if pargs.true_type_fonts:
        smtrunner.util.set_true_type_font()

    index_to_raw_result_infos = []
    index_to_file_name = []
    index_to_abs_file_path = []
    index_to_truncated_file_path = []
    index_to_ris = []
    index_to_legend_name = []
    if pargs.legend_name_map:
        # Naming is bad here. We actually expect
        # to receive a list of names to use the corresponds
        # to the ordering of RI files on the command line.
        with open(pargs.legend_name_map, 'r') as f:
            legend_list = smtrunner.util.loadYaml(f)
            if not isinstance(legend_list, list):
                _logger.error('Legend mapping file must be a list')
                return 1
            if len(legend_list) != len(pargs.result_infos):
                _logger.error(
                    'Legend mapping file list must contain {}'.format(
                        len(pargs.result_infos)))
                return 1
            index_to_legend_name = legend_list

    for index, result_infos_file_path in enumerate(pargs.result_infos):
        try:
            with open(result_infos_file_path, 'r') as f:
                _logger.info('Loading "{}"'.format(f.name))
                ris = ResultInfo.loadRawResultInfos(f)
                index_to_raw_result_infos.append(ris)
                index_to_file_name.append(f.name)
                index_to_abs_file_path.append(os.path.abspath(f.name))
                index_to_ris.append(ris['results'])
        except ResultInfo.ResultInfoValidationError as e:
            _logger.error('Validation error:\n{}'.format(e))
            return 1
        _logger.info('Loading done')
    result_infos = None

    longest_path_prefix = ResultInfoUtil.compute_longest_common_path_prefix(
        index_to_abs_file_path)
    index_to_prefix_truncated_path = []
    for index, _ in enumerate(pargs.result_infos):
        path = index_to_abs_file_path[index]
        index_to_prefix_truncated_path.append(path[len(longest_path_prefix):])
    # Now truncated suffixes
    longest_path_suffix = ResultInfoUtil.compute_longest_common_path_suffix(
        index_to_prefix_truncated_path)
    for index, _ in enumerate(pargs.result_infos):
        truncated_path = index_to_prefix_truncated_path[index]
        truncated_path = truncated_path[:-(len(longest_path_suffix))]
        index_to_truncated_file_path.append(truncated_path)
        assert index_to_truncated_file_path[index] == truncated_path

    # Perform grouping by benchmark name
    # Technically not necessary but this can be used as a safety check to make sure
    # all result info files are talking about the same benchmarks
    key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        index_to_raw_result_infos)
    if len(rejected_result_infos) > 0:
        _logger.warning('There were rejected result infos')
        num_merge_failures = 0
        for index, l in enumerate(rejected_result_infos):
            _logger.warning('Index {} had {} rejections'.format(index, len(l)))
            num_merge_failures += len(l)
        if num_merge_failures > 0:
            if pargs.allow_merge_failures:
                _logger.warning('Merge failures being allowed')
            else:
                _logger.error('Merge failures are not allowed')
                return 1

    _logger.info('Computing points')
    index_to_ri_scores = []
    index_to_x_points = []
    index_to_y_points = []
    index_to_y_error_points = []
    index_to_point_index_to_benchmark_name_map = []
    max_observed_y_value = 0.0
    max_observed_x_value = 0.0
    min_observed_x_value = 0.0
    for index, ris in enumerate(index_to_raw_result_infos):
        ri_scores = make_result_info_score_generator(pargs)
        index_to_ri_scores.append(ri_scores)
        ri_scores.addResults(ris['results'])
    # Do point computations
    do_global_compute_points_kwargs = {}
    if pargs.fuzzing_point_ordering:
        do_global_compute_points_kwargs[
            'point_ordering'] = pargs.fuzzing_point_ordering
    index_to_ri_scores[0].do_global_compute_points(
        index_to_ri_scores, **do_global_compute_points_kwargs)

    for index, ri_scores in enumerate(index_to_ri_scores):
        index_to_x_points.append(ri_scores.x_points)
        index_to_y_points.append(ri_scores.y_points)
        index_to_y_error_points.append(ri_scores.y_errors)
        index_to_point_index_to_benchmark_name_map.append(
            ri_scores.point_index_to_benchmark_name_map)

        # See if we've found a larger time.
        for y_point in ri_scores.y_points:
            if y_point is not None and y_point > max_observed_y_value:
                max_observed_y_value = y_point
        for x_point in ri_scores.x_points:
            if x_point > max_observed_x_value:
                max_observed_x_value = x_point
            if x_point < min_observed_x_value:
                min_observed_x_value = x_point
    _logger.info('Computing points done')
    _logger.info('min observed x value: {}'.format(min_observed_x_value))
    _logger.info('max observed x value: {}'.format(max_observed_x_value))
    _logger.info('max observed y value: {}'.format(max_observed_y_value))
    ri_scores = None

    # Report means
    for index, ri_score in enumerate(index_to_ri_scores):
        name = index_to_truncated_file_path[index]
        means = ri_score.get_y_mean_bounds()
        _logger.info('Means (<min>, <mean>, <max>) for {} is {}'.format(
            name, means))

    # Now try to plot
    fig, ax = plt.subplots()

    if len(pargs.title) > 0:
        ax.set_title(pargs.title)
    ax.set_xlabel(index_to_ri_scores[0].x_label)
    ax.set_ylabel(index_to_ri_scores[0].y_label)

    # Add curves
    curves = []
    legend_names = []
    for index, _ in enumerate(index_to_ris):
        _logger.info(
            '"{}" # of benchmarks with {} +ve score, {} -ve score, {} zero score'
            .format(index_to_truncated_file_path[index],
                    index_to_ri_scores[index].num_positive,
                    index_to_ri_scores[index].num_negative,
                    index_to_ri_scores[index].num_zero))
        x_points = index_to_x_points[index]
        y_points = index_to_y_points[index]
        y_errors = index_to_y_error_points[index]
        point_index_to_benchmark_name_map = index_to_point_index_to_benchmark_name_map[
            index]
        name_for_legend = None
        result_info_file_name = index_to_file_name[index]
        if pargs.legend_name_map:
            name_for_legend = index_to_legend_name[index]
        else:
            #name_for_legend = result_info_file_name
            name_for_legend = index_to_truncated_file_path[index]
        pickTolerance = 4
        if pargs.error_bars:
            p = ax.errorbar(
                x_points,
                y_points,
                yerr=y_errors,
                #picker=pickTolerance,
                drawstyle=pargs.drawstyle,
                markersize=pargs.point_size)
        else:
            p = ax.plot(
                x_points,
                y_points,
                '-o' if pargs.points else '-',
                #picker=pickTolerance,
                drawstyle=pargs.drawstyle,
                markersize=pargs.point_size)
        curves.append(p[0])

        legend_names.append(name_for_legend)
    # Add legend
    assert len(legend_names) == len(curves)
    if pargs.legend_position == 'none':
        fig.tight_layout()
    elif pargs.legend_position == 'inner':
        legend = ax.legend(tuple(curves),
                           tuple(legend_names),
                           ncol=pargs.legend_num_columns,
                           loc='upper left',
                           fontsize=pargs.legend_font_size)
        fig.tight_layout()
    elif pargs.legend_position == 'outside_right':
        # HACK: move the legend outside
        # Shrink current axis by 20%
        box = ax.get_position()
        print(box)
        legend = ax.legend(
            tuple(curves),
            tuple(legend_names),
            loc='upper left',
            bbox_to_anchor=(1.01, 1.0),
            borderaxespad=0,  # No padding so that corners line up
            fontsize=pargs.legend_font_size,
            ncol=pargs.legend_num_columns)

        # Work out how wide the legend is in terms of axes co-ordinates
        fig.canvas.draw()  # Needed say that legend size computation is correct
        legendWidth, _ = ax.transAxes.inverted().transform(
            (legend.get_frame().get_width(), legend.get_frame().get_height()))
        assert legendWidth > 0.0

        # FIXME: Why do I have to use 0.95??
        ax.set_position(
            [box.x0, box.y0, box.width * (0.95 - legendWidth), box.height])
    elif pargs.legend_position == 'outside_bottom':
        box = ax.get_position()
        legend = ax.legend(tuple(curves),
                           tuple(legend_names),
                           ncol=pargs.legend_num_columns,
                           bbox_to_anchor=(0.5, -0.13),
                           loc='upper center',
                           fontsize=pargs.legend_font_size)
        # Work out how wide the legend is in terms of axes co-ordinates
        fig.canvas.draw()  # Needed say that legend size computation is correct
        legendWidth, legendHeight = ax.transAxes.inverted().transform(
            (legend.get_frame().get_width(), legend.get_frame().get_height()))
        hack_y_axis_offset = 0.15
        ax.set_position([
            box.x0, box.y0 + legendHeight + hack_y_axis_offset, box.width,
            box.height - legendHeight - 0.6 * hack_y_axis_offset
        ])
    else:
        assert False

    if pargs.legend_position != 'none':
        if 'set_draggable' in dir(legend):
            legend.set_draggable(
                True)  # Make it so we can move the legend with the mouse
        else:
            legend.draggable(True)

    # Adjust y-axis so it is a log plot everywhere except [-1,1] which is linear
    ax.set_yscale('symlog', linthreshy=1.0, linscaley=0.1)

    #set minor ticks on y-axis
    from matplotlib.ticker import LogLocator
    import numpy
    yAxisLocator = LogLocator(subs=numpy.arange(1.0, 10.0))
    ax.yaxis.set_minor_locator(yAxisLocator)
    ax.yaxis.set_tick_params(which='minor', length=4)
    ax.yaxis.set_tick_params(which='major', length=6)
    #ax.grid()

    # Y-axis bounds
    if pargs.max_exec_time:
        assert pargs.max_exec_time > 0.0
        ax.set_ybound(lower=0.0, upper=pargs.max_exec_time)
    else:
        ax.set_ybound(lower=0.0,
                      upper=round_away_from_zero_to_multiple_of(
                          100, max_observed_y_value))

    # X-axis bounds
    # Round up to nearest multiple of 10
    assert max_observed_x_value >= 0.0
    x_axis_upper_bound = round_away_from_zero_to_multiple_of(
        10, max_observed_x_value)
    x_axis_lower_bound = round_away_from_zero_to_multiple_of(
        10, min_observed_x_value)
    ax.set_xbound(lower=x_axis_lower_bound, upper=x_axis_upper_bound)
    _logger.info('X axis bounds [{}, {}]'.format(x_axis_lower_bound,
                                                 x_axis_upper_bound))

    if pargs.ipython:
        # Useful interfactive console
        header = """Useful commands:
        fig.show() - Shows figure
        fig.canvas.draw() - Redraws figure (useful if you changed something)
        fig.savefig('something.pdf') - Save the figure
        """
        from IPython import embed
        embed(header=header)
    elif pargs.pdf != None:
        fig.show()
        logging.info('Writing PDF to {}'.format(pargs.pdf))
        fig.savefig(pargs.pdf)
    elif pargs.svg != None:
        fig.show()
        logging.info('Writing svg to {}'.format(pargs.svg))
        fig.savefig(pargs.svg)
    else:
        plt.show()
    return 0
コード例 #11
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'), nargs='+')
    parser.add_argument(
        '--bin-width',
        type=int,
        default=5,
        dest='bin_width',
        help='Histogram bin width in seconds (default %(default)s)',
    )
    parser.add_argument(
        '--use-result-with-index',
        dest='use_result_with_index',
        default=-1,
        help=
        'When outputting result info pick the result info for the benchmark from specified index. If -1 then pick from the relevant result info',
    )
    parser.add_argument(
        '--max-time',
        type=int,
        default=120,
        dest='max_time',
        help='Assumed max time is seconds (default %(default)s)',
    )
    parser.add_argument('--random-seed',
                        type=int,
                        default=0,
                        dest='random_seed')
    parser.add_argument(
        '--bound',
        type=int,
        default=100,
        help='Maximum number of benchmarks to gather (default %(default)s)',
    )
    parser.add_argument(
        '--keep-on-pick',
        dest='keep_on_pick',
        help='When selecting benchmark keep it in histogram',
        default=False,
        action='store_true',
    )
    parser.add_argument(
        '--selection-mode',
        dest='selection_mode',
        default='inv_height_probability',
        choices=['inv_height_probability', 'rand_bin'],
    )
    parser.add_argument(
        '--seed-selection-from',
        dest='seed_selection_from',
        default=None,
        help='Seed selected benchmarks from supplied invocation info file')
    parser.add_argument('-o',
                        '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location (default stdout)')
    parser.add_argument('--hack-check-bins-included-with-count-lt',
                        type=int,
                        dest='hack_check_bins_included_with_count_less_than',
                        default=None)

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    result_infos_list = []
    for of in pargs.result_infos:
        try:
            _logger.info('Loading "{}"'.format(of.name))
            result_infos = ResultInfo.loadRawResultInfos(of)
            result_infos_list.append(result_infos)
        except ResultInfo.ResultInfoValidationError as e:
            _logger.error('Validation error:\n{}'.format(e))
            return 1
    _logger.info('Loading done. Loaded {} result info files'.format(
        len(result_infos_list)))

    # Set random seed
    random.seed(pargs.random_seed)

    key_fn = ResultInfoUtil.get_result_info_key

    # Benchmarks to keep, this is used for checking if a HSM
    # has given us a benchmark we have already selected
    btk = set()
    btk_to_result_info_index = dict()

    if pargs.seed_selection_from:
        if not os.path.exists(pargs.seed_selection_from):
            _logger.error('{} does not exist'.format(
                pargs.seed_selection_from))
            return 1
        with open(pargs.seed_selection_from, 'r') as f:
            _logger.info('Seeding selection from {}'.format(f.name))
            ris_for_seeding = ResultInfo.loadRawResultInfos(f)
            # Now pull keys from the the result info file
            for ri in ris_for_seeding['results']:
                if len(btk) >= pargs.bound:
                    _logger.info('Bound reached')
                    break
                key_for_ri = key_fn(ri)
                btk.add(key_for_ri)
                # HACK: Lie about the source
                btk_to_result_info_index[key_for_ri] = 0
            _logger.info('Seeded selection with {} benchmarks'.format(
                len(btk)))
            assert len(btk) == len(btk_to_result_info_index.keys())

    # Group
    key_to_result_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        result_infos_list, key_fn=key_fn)
    if len(rejected_result_infos):
        for index, l in enumerate(rejected_result_infos):
            if len(l) > 0:
                _logger.error('Found rejected result infos:\n{}'.format(
                    pprint.pformat(rejected_result_infos)))
                return 1

    histogram_sms = []
    for result_infos in result_infos_list:
        histogram_sms.append(
            HistogramStateMachine(result_infos['results'], pargs.bin_width,
                                  pargs.max_time, key_fn))
    original_sms = histogram_sms.copy()  # Shallow copy

    # HACK: Do check
    desirable_keys = set()
    if pargs.hack_check_bins_included_with_count_less_than:
        _logger.info(
            'Doing hack - check bins with count less than {} are included'.
            format(pargs.hack_check_bins_included_with_count_less_than))
        assert pargs.hack_check_bins_included_with_count_less_than > 0
        # Walk through the bins and collect all keys where
        # bin count is less than the specified value.
        for hsm in histogram_sms:
            for bin in hsm.bins:
                if bin.getSize(
                ) < pargs.hack_check_bins_included_with_count_less_than:
                    _logger.info(
                        'Adding keys from bin {} to desirable keys'.format(
                            bin.getBounds()))
                    _logger.debug('Adding keys:\n{}'.format(
                        pprint.pformat(bin.getKeys())))
                    desirable_keys.update(bin.getKeys())
        _logger.info('{} keys in set of desirable benchmarks'.format(
            len(desirable_keys)))

    # Keep picking round robin between the state machines until
    # a bound is reached.
    _logger.info(
        'Beginning {} selection with bound of {} and seed of {} benchmarks'.
        format(pargs.selection_mode, pargs.bound, len(btk)))
    initialBtkSize = len(btk)
    while len(btk) < pargs.bound:
        if len(histogram_sms) == 0:
            _logger.warning('Exhausted all histogram SMs')
            break
        hsms_to_remove = set()
        # Go through sms in round robin order.
        for index, hsm in enumerate(histogram_sms):
            if len(btk) >= pargs.bound:
                # Don't allow bound to be exceeded.
                break
            benchmark_key = None
            while benchmark_key is None:
                # Based on selection mode pick a benchmark
                if pargs.selection_mode == 'inv_height_probability':
                    benchmark_key = hsm.getNext(
                        remove_item=not pargs.keep_on_pick)
                elif pargs.selection_mode == 'rand_bin':
                    benchmark_key = hsm.getNextRandBin(
                        remove_item=not pargs.keep_on_pick)
                else:
                    raise Exception('Unsupported selection mode')
                _logger.debug('Got key {}'.format(benchmark_key))
                if benchmark_key is None:
                    # hsm exhausted
                    _logger.debug('HSM index {} exhausted'.format(index))
                    hsms_to_remove.add(index)
                    break
                if benchmark_key in btk:
                    _logger.debug('Already have key {}'.format(benchmark_key))
                    # We already have this benchmark
                    # Try picking another.
                    benchmark_key = None
                    continue
            if benchmark_key is not None:
                _logger.debug('Adding key {}'.format(benchmark_key))
                assert benchmark_key not in btk_to_result_info_index
                assert benchmark_key not in btk
                btk_to_result_info_index[benchmark_key] = index
                btk.add(benchmark_key)
        if len(hsms_to_remove) > 0:
            new_hsms = []
            for index, hsm in enumerate(histogram_sms):
                if index not in hsms_to_remove:
                    _logger.debug('keeping HSM {}'.format(index))
                    new_hsms.append(hsm)
                else:
                    _logger.debug('dropping HSM {}'.format(index))
            histogram_sms = new_hsms
    _logger.info('Selected {} benchmarks'.format(len(btk) - initialBtkSize))
    _logger.info('Final selection has {} benchmarks'.format(len(btk)))
    assert len(btk) == len(btk_to_result_info_index)

    new_result_infos = {
        'results': [],
        'schema_version': 0,
    }
    new_results_list = new_result_infos['results']

    used_programs = set()
    # Grab the result info by key
    for key, result_info_index in sorted(btk_to_result_info_index.items(),
                                         key=lambda tup: tup[0]):
        # Just pick the first
        index_to_use = pargs.use_result_with_index
        if index_to_use == -1:
            # Use result info corresponding to the result info we took it from
            index_to_use = result_info_index
        _logger.debug('Selected key {} from result_info_index {}'.format(
            key, index_to_use))
        ri = key_to_result_infos[key][index_to_use]
        _logger.debug('Grabbed {}'.format(key_to_result_infos[key]))
        _logger.debug('Grabbed {}'.format(ri))
        new_results_list.append(ri)
        if key in used_programs:
            # Sanity check
            _logger.error(
                'Selected key ({}) that has already been used'.format(key))
            return 1
        used_programs.add(key)

    # HACK:
    if pargs.hack_check_bins_included_with_count_less_than:
        missing_desirable_benchmarks = desirable_keys.difference(
            set(btk_to_result_info_index.keys()))
        _logger.warning(
            '{} desirable benchmarks missing from selection'.format(
                len(missing_desirable_benchmarks)))
        if len(missing_desirable_benchmarks) > 0:
            _logger.error(
                'Desirable {} benchmarks missing from selection:\n{}'.format(
                    len(missing_desirable_benchmarks),
                    pprint.pformat(missing_desirable_benchmarks)))
            return 1

    # Validate against schema
    try:
        _logger.info('Validating new_result_infos')
        ResultInfo.validateResultInfos(new_result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')
    smtrunner.util.writeYaml(pargs.output, new_result_infos)
    return 0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'), nargs='+')
    parser.add_argument(
        '--dump-tags',
        dest="dump_tags",
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--dump-corresponding-tags',
        dest="dump_corresponding_tags",
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--allow-merge-failures',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--only-report-conflicts-for-expected-unknown',
        dest='only_report_conflicts_for_expected_unknown',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--no-rebase-paths',
        dest='no_rebase_paths',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--indices-to-use-for-conflict-check',
        dest='indices_to_use_for_conflict_check',
        type=int,
        nargs='+',
        default=[],
        help='By default all indices are used',
    )
    parser.add_argument(
        '--index-for-compute-sets',
        dest='index_for_compute_sets',
        type=int,
        default=None,
    )

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if len(pargs.indices_to_use_for_conflict_check) == 0:
        indices_to_use_for_conflict_check = list(
            range(0, len(pargs.result_infos)))
    else:
        indices_to_use_for_conflict_check = pargs.indices_to_use_for_conflict_check
    for i in indices_to_use_for_conflict_check:
        if i >= len(pargs.result_infos):
            _logger.error('Index {} is invalid. Must be < {}'.format(
                i, len(pargs.result_infos)))
            return 1

    if pargs.index_for_compute_sets is not None:
        if pargs.index_for_compute_sets >= len(pargs.result_infos):
            _logger.error(
                'Index {} for compute sets is invalid. Must be < {}'.format(
                    pargs.index_for_compute_sets, len(pargs.result_infos)))
            return 1

    index_to_raw_result_infos = []
    index_to_file_name = []
    index_to_tag_to_key_map = []
    index_to_keys_with_mixed_tags = []
    for of in pargs.result_infos:
        try:
            _logger.info('Loading "{}"'.format(of.name))
            result_infos = ResultInfo.loadRawResultInfos(of)
            index_to_raw_result_infos.append(result_infos)
            index_to_file_name.append(of.name)
            index_to_tag_to_key_map.append(dict())
            index_to_keys_with_mixed_tags.append(set())
        except ResultInfo.ResultInfoValidationError as e:
            _logger.error('Validation error:\n{}'.format(e))
            return 1
    _logger.info('Loading done')

    if not pargs.no_rebase_paths:
        index_to_file_name = ResultInfoUtil.rebase_paths_infer(
            index_to_file_name)

    for value in indices_to_use_for_conflict_check:
        _logger.info('Using {} for conflict checks'.format(
            index_to_file_name[value]))

    # Group result infos by benchmark name
    # Perform grouping by benchmark name
    key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        index_to_raw_result_infos)
    if len(rejected_result_infos) > 0:
        l_was_empty = True
        for index, l in enumerate(rejected_result_infos):
            _logger.warning('Index {} had {} rejections'.format(index, len(l)))
            if len(l) > 0:
                _logger.warning('There were rejected result infos')
                l_was_empty = False
        if not l_was_empty:
            if pargs.allow_merge_failures:
                _logger.warning('Merge failures being allowed')
            else:
                _logger.error('Merge failures are not allowed')
                return 1

    # Now collect tags
    keys_to_indices_that_conflict = dict()
    _logger.info('Found {} benchmarks'.format(len(
        key_to_results_infos.keys())))
    for key, ris in key_to_results_infos.items():
        tags_for_key = []
        for index, ri in enumerate(ris):
            tags = ri['event_tag']
            _logger.debug('For {} : {} got {}'.format(
                key, index_to_file_name[index], tags))
            if isinstance(tags, list):
                # Merged result
                try:
                    merged_tag, tags_were_mixed = event_analysis.merge_aggregate_events(
                        tags)
                    if tags_were_mixed:
                        index_to_keys_with_mixed_tags[index].add(key)
                except event_analysis.MergeEventFailure as e:
                    _logger.error(
                        'Failed to merge events {} for benchmark {} for {}'.
                        format(tags, key, index_to_file_name[index]))
                    raise e
            else:
                # Single result
                assert isinstance(tags, str)
                merged_tag = tags
                tags_were_mixed = False
            # Record the tag
            if index in indices_to_use_for_conflict_check:
                tags_for_key.append(merged_tag)
            else:
                tags_for_key.append('')
            try:
                index_to_tag_to_key_map[index][merged_tag].add(key)
            except KeyError:
                index_to_tag_to_key_map[index][merged_tag] = {key}
        # Now look at the reported tags and check for conflicts
        conflicting_indices = find_conflicts(tags_for_key)
        if len(conflicting_indices) > 0:
            if ris[0][
                    'expected_sat'] != 'unknown' and pargs.only_report_conflicts_for_expected_unknown:
                _logger.warning('Skipping found conflict for {}'.format(key))
                continue
            keys_to_indices_that_conflict[key] = conflicting_indices
            _logger.warning(
                'Found conflict for benchmark {} with:\ntags:{}\nnames:\n{}\n'.
                format(key, [tags_for_key[i] for i in conflicting_indices],
                       [index_to_file_name[i] for i in conflicting_indices]))

    # Now report tags
    print("Found {} conflicting benchmarks".format(
        len(keys_to_indices_that_conflict.keys())))
    for index, tag_to_key_map in enumerate(index_to_tag_to_key_map):
        report(
            index,
            index_to_file_name,
            tag_to_key_map,
            index_to_keys_with_mixed_tags[index],
            key_to_results_infos,
            pargs.dump_corresponding_tags,
            pargs.dump_tags,
        )

    # Now reports sets
    if pargs.index_for_compute_sets is not None:
        report_sets_for_index(pargs.index_for_compute_sets,
                              index_to_tag_to_key_map, index_to_file_name,
                              set(key_to_results_infos.keys()))
    return 0
コード例 #13
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('original_result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('original_result_infos_wd',
                        type=str)
    parser.add_argument('patch_result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('patch_result_infos_wd',
                        type=str)
    parser.add_argument('output_result_info',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location for result info YAML file')
    parser.add_argument('output_result_infos_wd',
                        type=str)
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    # Validate paths
    if not check_dir_exists(pargs.original_result_infos_wd):
        return 1
    original_result_infos_wd = pargs.original_result_infos_wd
    if not check_dir_exists(pargs.patch_result_infos_wd):
        return 1
    patch_result_infos_wd = pargs.patch_result_infos_wd
    if os.path.exists(pargs.output_result_infos_wd):
        _logger.error('"{}" already exists'.format(pargs.output_result_infos_wd))
        return 1
    # Load YAML files
    original_raw_results_info = load_yaml(pargs.original_result_infos)
    if original_raw_results_info is None:
        return 1
    patch_raw_result_infos = load_yaml(pargs.patch_result_infos)
    if patch_raw_result_infos is None:
        return 1
    _logger.info('Loading done')

    # Group patch results by key for look-up
    key_to_patch_result = dict()
    for ri in patch_raw_result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        assert key not in key_to_patch_result
        key_to_patch_result[key] = ri

    # Construct new results info
    new_rri = original_raw_results_info.copy() # shallow copy
    new_results = []
    new_rri['results'] = new_results
     # Absolute paths to copy into new working directory map to destination name
    workdirs_to_copy = dict()

    used_keys = set()
    used_dest_names = set()
    patch_count = 0
    _logger.info('Constructing new results')
    for ri in original_raw_results_info['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        assert key not in used_keys
        ri_to_use = None
        wd_path_prefix = None
        wd_dest_name = None
        if key in key_to_patch_result:
            ri_to_use = key_to_patch_result[key]
            wd_path_prefix = patch_result_infos_wd
            # HACK: Good enough to avoid name collision
            wd_dest_name = os.path.basename(ri_to_use['working_directory']) + "_patched"
            patch_count += 1
        else:
            ri_to_use = ri
            wd_path_prefix = original_result_infos_wd
            wd_dest_name = os.path.basename(ri_to_use['working_directory'])
        wd_path = join_path(wd_path_prefix, ri_to_use['working_directory'])
        if not check_dir_exists(wd_path):
            return 1
        assert wd_path not in workdirs_to_copy
        # Patch paths if necessary
        ri_to_use = patch_ri_paths(ri_to_use, wd_dest_name)
        assert wd_dest_name not in used_dest_names
        workdirs_to_copy[wd_path] = wd_dest_name
        new_results.append(ri_to_use)
        used_keys.add(key)
        used_dest_names.add(wd_dest_name)

    # Compute new results to add
    _logger.info('Adding new results')
    add_count = 0
    new_keys = set(key_to_patch_result.keys()).difference(used_keys)
    for key in new_keys:
        add_count += 1
        new_results.append(key_to_patch_result[key])

    print("# of patched results: {}".format(patch_count))
    print("# of new results: {}".format(add_count))

    # Output the new results as YAML
    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(new_rri)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')
    _logger.info('Writing to "{}"'.format(pargs.output_result_info.name))
    smtrunner.util.writeYaml(pargs.output_result_info, new_rri)
    _logger.info('Writing done')

    # Now create the new working directory by copying from other directories
    create_new_working_directories(pargs.output_result_infos_wd, workdirs_to_copy)
    return 0
コード例 #14
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base',
                        dest="benchmark_base",
                        default="",
                        type=str)
    parser.add_argument('--wd-base', dest="wd_base", default="", type=str)
    parser.add_argument(
        '--dump-tags',
        dest="dump_tags",
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--timeout',
        type=float,
        default=None,
    )
    parser.add_argument(
        '--use-dsoes-wallclock-time',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--bool-args',
        dest='bool_args',
        nargs='+',
        default=[],
    )
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)
    _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base))
    _logger.info('Using working directory base of "{}"'.format(pargs.wd_base))

    extra_kwargs = {}
    bool_arg_re = re.compile(r'^([a-zA-z.]+)=(true|false)')
    for b in pargs.bool_args:
        m = bool_arg_re.match(b)
        if m is None:
            _logger.error('"{}" is not valid bool assignment'.format(b))
            return 1
        var_name = m.group(1)
        assignment = m.group(2)
        _logger.info('Adding extra param "{}" = {}'.format(
            var_name, assignment))
        if assignment == 'true':
            assignment_as_bool = True
        else:
            assert assignment == 'false'
            assignment_as_bool = False
        extra_kwargs[var_name] = assignment_as_bool

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    runner = result_infos['misc']['runner']
    _logger.info('Found runner "{}"'.format(runner))
    backend = None
    if 'backend' in result_infos['misc']:
        backend = result_infos['misc']['backend']
    _logger.info('Backend was "{}"'.format(backend))

    event_analyser = event_analysis.get_event_analyser_from_runner_name(
        runner,
        soft_timeout=pargs.timeout,
        use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time,
        **extra_kwargs)
    tag_to_keys = dict()
    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        # Construct get event tag info
        geti = event_analysis.GETInfo(ri=ri,
                                      wd_base=pargs.wd_base,
                                      benchmark_base=pargs.benchmark_base,
                                      backend=backend)
        tag = event_analyser.get_event_tag(geti)
        if tag is None:
            _logger.error('Unhandled event for "{}"'.format(key))
            _logger.error(pprint.pformat(ri))
            return 1
        # Record tag
        try:
            cur_set = tag_to_keys[tag]
            cur_set.add(key)
        except KeyError:
            tag_to_keys[tag] = {key}

    # Dump tags
    print("")
    print("TAG COUNTS")
    for tag_name, keys in sorted(tag_to_keys.items(), key=lambda k: k[0]):
        print("{}: {}".format(tag_name, len(keys)))
    # Dump requested tags
    for tag_name in pargs.dump_tags:
        if tag_name in tag_to_keys:
            print("{}: \n{}".format(tag_name,
                                    pprint.pformat(tag_to_keys[tag_name])))
        else:
            _logger.error('Tag "{}" not present'.format(tag_name))
    return 0
コード例 #15
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('--base', type=str, default="")
    parser.add_argument('--random-seed',
                        type=int,
                        default=0,
                        dest='random_seed')
    parser.add_argument('-o',
                        '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location (default stdout)')
    # START filter arguments
    parser.add_argument(
        '--filter-out-expected-sat',
        dest='filter_out_expected_sat',
        nargs='+',  # gather into list
        choices=['sat', 'unsat', 'unknown'],
        default=[],
    )
    parser.add_argument(
        '--filter-out-sat',
        dest='filter_out_sat',
        nargs='+',  # gather into list
        choices=['sat', 'unsat', 'unknown'],
        default=[],
    )
    parser.add_argument(
        '--filter-random-percentage',
        dest='filter_random_percentange',
        type=float,
        default=None,
    )
    parser.add_argument(
        '--filter-keep-benchmark-matching-regex',
        dest='filter_keep_benchmarks_matching_regex',
        type=str,
        default=None,
    )
    parser.add_argument(
        '--filter-out-benchmark-matching-regex',
        dest='filter_out_benchmarks_matching_regex',
        type=str,
        default=None,
    )
    parser.add_argument(
        '--filter-keep-benchmark-matching-exit-code',
        dest='filter_keep_benchmarks_matching_exit_code',
        type=int,
        default=None,
    )
    parser.add_argument(
        '--filter-keep-non-trivial',
        default=False,
        action='store_true',
    )
    parser.add_argument(
        '--filter-keep-trivial',
        default=False,
        action='store_true',
    )
    parser.add_argument(
        '--filter-keep-benchmarks-from-file',
        dest='filter_keep_benchmarks_from_file',
        type=argparse.FileType('r'),
    )
    parser.add_argument(
        '--filter-out-benchmarks-from-file',
        dest='filter_out_benchmarks_from_file',
        type=argparse.FileType('r'),
    )

    # END filter arguments

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    # Set random seed
    random.seed(pargs.random_seed)

    # START Apply filters
    new_results = result_infos['results']
    if len(pargs.filter_out_expected_sat) > 0:
        new_results = filter_out_expected_sat_types(
            new_results, pargs.filter_out_expected_sat, pargs.base)

    if len(pargs.filter_out_sat) > 0:
        new_results = filter_out_sat_types(new_results, pargs.filter_out_sat,
                                           pargs.base)

    if pargs.filter_random_percentange is not None:
        if not (pargs.filter_random_percentange >= 0.0
                and pargs.filter_random_percentange <= 1.0):
            _logger.error('Filter percentage must be in range [0.0, 1.0]')
            return 1
        new_results = filter_random_percentange(
            new_results, pargs.filter_random_percentange)
    if pargs.filter_keep_benchmarks_matching_regex:
        new_results = filter_keep_benchmarks_matching_regex(
            new_results, pargs.filter_keep_benchmarks_matching_regex)
    if pargs.filter_out_benchmarks_matching_regex:
        new_results = filter_out_benchmarks_matching_regex(
            new_results, pargs.filter_out_benchmarks_matching_regex)

    if pargs.filter_keep_benchmarks_matching_exit_code is not None:
        new_results = filter_keep_benchmarks_matching_exit_code(
            new_results, pargs.filter_keep_benchmarks_matching_exit_code)

    if pargs.filter_keep_non_trivial:
        new_results = filter_keep_non_trivial_benchmarks(new_results)
    if pargs.filter_keep_trivial:
        new_results = filter_keep_trivial_benchmarks(new_results)

    if pargs.filter_out_benchmarks_from_file:
        new_results = filter_out_benchmarks_from_file(
            new_results, pargs.filter_out_benchmarks_from_file)
    if pargs.filter_keep_benchmarks_from_file:
        new_results = filter_keep_benchmarks_from_file(
            new_results, pargs.filter_keep_benchmarks_from_file)

    # END Apply filters
    new_result_infos = result_infos
    new_result_infos['results'] = new_results

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(new_result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, new_result_infos)
    return 0
コード例 #16
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base',
        dest="benchmark_base",
        default="",
        type=str)
    parser.add_argument('--wd-base',
        dest="wd_base",
        default="",
        type=str)
    parser.add_argument('--timeout',
        type=float,
        default=None,
        help='Timeout to assume when creating tags',
    )
    parser.add_argument('--use-dsoes-wallclock-time',
        action='store_true',
        default=False,
    )
    parser.add_argument('--output',
        type=argparse.FileType('w'),
        default=sys.stdout,
    )
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)
    _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base))
    _logger.info('Using working directory base of "{}"'.format(pargs.wd_base))

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    runner = result_infos['misc']['runner']
    _logger.info('Found runner "{}"'.format(runner))
    backend = None
    if 'backend' in result_infos['misc']:
        backend = result_infos['misc']['backend']
    _logger.info('Backend was "{}"'.format(backend))

    event_analyser = event_analysis.get_event_analyser_from_runner_name(
        runner,
        soft_timeout=pargs.timeout,
        use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time)
    if not isinstance(event_analyser, event_analysis.JFSRunnerEventAnalyser):
        _logger.error('Must be a JFS run')
        return 1
    new_results = result_infos.copy()
    new_results['results'] = []
    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        wd = ResultInfoUtil.get_result_info_wd(ri)
        geti = event_analysis.GETInfo(
            ri=ri,
            wd_base=pargs.wd_base,
            benchmark_base=pargs.benchmark_base,
            backend=backend
        )

        new_ri = ri.copy()

        num_inputs, num_wrong_sized_inputs, fuzzing_wallclock_time = event_analyser.get_fuzzing_throughput_fields(geti)
        assert num_inputs is None or isinstance(num_inputs, int)
        assert num_wrong_sized_inputs is None or isinstance(num_wrong_sized_inputs, int)
        assert fuzzing_wallclock_time is None or isinstance(fuzzing_wallclock_time, float)
        _logger.info('num_inputs = {} for {}'.format(num_inputs, key))
        _logger.info('num_wrong_sized_inputs = {} for {}'.format(num_wrong_sized_inputs, key))
        _logger.info('fuzzing_wallclock_time = {} for {}'.format(
            fuzzing_wallclock_time,
            key))

        # Get LibFuzzer stats
        libfuzzer_avg_exec = event_analyser.get_libfuzzer_stat_average_exec_per_sec(geti)
        new_ri['libfuzzer_average_exec_per_sec'] = libfuzzer_avg_exec
        _logger.info('libfuzzer_average_exec_per_sec = {} for {}'.format(
            libfuzzer_avg_exec,
            key))

        # Get event tag so we can determine when the through put information
        # should be available.
        tag = event_analyser.get_event_tag(geti)
        if tag is None:
            _logger.error('Unhandled event for "{}"'.format(key))
            _logger.error(pprint.pformat(ri))
            return 1
        if tag in {'sat', 'unsat', 'sat_but_expected_unsat', 'unsat_but_expected_sat'}:
            if num_inputs is None:
                _logger.error('num_inputs should not be None for {} ({})'.format(key, wd))
                return 1
            if num_wrong_sized_inputs is None:
                _logger.error('num_wrong_sized_inputs should not be None for {} ({})'.format(key, wd))
                return 1
            if fuzzing_wallclock_time is None:
                _logger.error('fuzzing_wallclock_time should not be None for {} ({})'.format(key, wd))
                return 1


        new_ri['jfs_stat_num_inputs'] = num_inputs
        new_ri['jfs_stat_num_wrong_sized_inputs'] = num_wrong_sized_inputs
        new_ri['jfs_stat_fuzzing_wallclock_time'] = fuzzing_wallclock_time
        new_results['results'].append(new_ri)

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(new_results)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, new_results)

    return 0
コード例 #17
0
def main(args):
    global _logger
    global _fail_count
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'),
                        nargs='+')
    parser.add_argument('--base', type=str, default="")
    parser.add_argument('--allow-merge-failures',
        dest='allow_merge_failures',
        default=False,
        action='store_true',
    )
    parser.add_argument('--no-rank-unknown',
        dest='no_rank_unknown',
        default=False,
        action='store_true',
    )
    parser.add_argument('--dump-wins',
        dest='dump_wins',
        default=False,
        action='store_true',
    )
    parser.add_argument('--max-exec-time',
        default=None,
        type=float,
        dest='max_exec_time',
    )
    """
    parser.add_argument('-o', '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location (default stdout)')
                        """

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if not pargs.no_rank_unknown and pargs.max_exec_time is None:
        _logger.error('Max time must be specified')
        return 1

    index_to_raw_result_infos = []
    index_to_file_name = []
    index_to_wins = []
    for index, result_infos_file in enumerate(pargs.result_infos):
        try:
            _logger.info('Loading "{}"'.format(result_infos_file.name))
            result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
            index_to_raw_result_infos.append(result_infos)
            index_to_file_name.append(result_infos_file.name)
            index_to_wins.append(set())
        except ResultInfo.ResultInfoValidationError as e:
            _logger.error('Validation error:\n{}'.format(e))
            return 1
        _logger.info('Loading done')
    result_infos = None

    # Perform grouping by benchmark name
    key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
            index_to_raw_result_infos)
    if len(rejected_result_infos) > 0:
        _logger.warning('There were rejected result infos')
        num_merge_failures = 0
        for index, l in enumerate(rejected_result_infos):
            _logger.warning('Index {} had {} rejections'.format(index, len(l)))
            num_merge_failures += len(l)
        if num_merge_failures > 0:
            if pargs.allow_merge_failures:
                _logger.warning('Merge failures being allowed')
            else:
                _logger.error('Merge failures are not allowed')
                return 1


    failed_to_rank=set()
    for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]):
        _logger.info('Ranking on "{}" : '.format(key))
        indices_to_use = []
        # Compute indices to use
        modified_raw_result_info_list = [ ]

        # Handle "unknown"
        # Only compare results that gave sat/unsat
        for index, ri in enumerate(raw_result_info_list):
            sat, _ = analysis.get_sat_from_result_info(ri)
            _logger.info('index {} {}'.format(index, sat))
            if sat != 'unknown':
                indices_to_use.append(index)
                modified_raw_result_info_list.append(ri)
            else:
                if pargs.no_rank_unknown:
                    # Legacy
                    modified_raw_result_info_list.append(ri)
                    _logger.debug('Not using index {} for {} due to unknown'.format(
                        index,
                        key))
                else:
                    modified_ri = analysis.get_result_with_modified_time(
                        ri,
                        pargs.max_exec_time)
                    _logger.debug('modified_ri: {}'.format(pprint.pformat(modified_ri)))
                    _logger.debug('Treating index {} for {} due to unknown as having max-time'.format(
                        index,
                        key))
                    indices_to_use.append(index)
                    modified_raw_result_info_list.append(modified_ri)
        _logger.debug('used indices_to_use: {}'.format(indices_to_use))

        if len(indices_to_use) == 0:
            # Can't rank
            failed_to_rank.add(key)
            continue

        ranked_indices, ordered_bounds = analysis.rank_by_execution_time(
            modified_raw_result_info_list,
            indices_to_use,
            pargs.max_exec_time,
            analysis.get_arithmetic_mean_and_99_confidence_intervals,
            ['dsoes_wallclock', 'wallclock'])
        _logger.info('Ranking on "{}" : {}'.format(key, ranked_indices))
        _logger.info('Ranking on "{}" : {}'.format(key, ordered_bounds))
        # Record win
        if len(ranked_indices[0]) == 1:
            # Winner
            winner_index = ranked_indices[0][0]
            _logger.info('Recorded win for {}'.format(
                index_to_file_name[winner_index]))
            index_to_wins[winner_index].add(key)
        else:
            failed_to_rank.add(key)

    # Report wins
    for index, winner_key_set in enumerate(index_to_wins):
        name = index_to_file_name[index]
        print("# of wins for {}: {}".format(name, len(winner_key_set)))
        if pargs.dump_wins:
            print(pprint.pformat(sorted(list(winner_key_set))))
    print("# failed to rank: {}".format(len(failed_to_rank)))
    return 0
コード例 #18
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'), nargs='+')
    parser.add_argument('--wd-bases', type=str, default=[], nargs='+')
    parser.add_argument(
        '--allow-merge-failures',
        dest='allow_merge_failures',
        default=False,
        action='store_true',
    )
    parser.add_argument('-o',
                        '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location (default stdout)')

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if len(pargs.wd_bases) > 0:
        if len(pargs.wd_bases) != len(pargs.result_infos):
            _logger.error(
                'Number of working directory bases must = number of result info files'
            )
            return 1
        for wd_base in pargs.wd_bases:
            if not os.path.exists(wd_base):
                _logger.error('"{}" does not exist'.format(wd_base))
                return 1
            if not os.path.isdir(wd_base):
                _logger.error('"{}" is not a directory'.format(wd_base))
                return 1
            if not os.path.isabs(wd_base):
                _logger.error('"{}" must be an absolute path'.format(wd_base))
                return 1

    index_to_raw_result_infos = []

    for index, result_infos_file in enumerate(pargs.result_infos):
        try:
            _logger.info('Loading "{}"'.format(result_infos_file.name))
            result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
            index_to_raw_result_infos.append(result_infos)
        except ResultInfo.ResultInfoValidationError as e:
            _logger.error('Validation error:\n{}'.format(e))
            return 1
        _logger.info('Loading done')
    result_infos = None

    # HACK: Do something smarter here
    merged = {
        'misc': {
            'runner': index_to_raw_result_infos[0]['misc']['runner'],
        },
        'results': [],
        'schema_version': index_to_raw_result_infos[0]['schema_version'],
    }

    # Perform grouping by benchmark name
    key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        index_to_raw_result_infos)
    if len(rejected_result_infos) > 0:
        l_was_empty = True
        for index, l in enumerate(rejected_result_infos):
            _logger.warning('Index {} had {} rejections'.format(index, len(l)))
            if len(l) > 0:
                _logger.warning('There were rejected result infos')
                l_was_empty = False
        if not l_was_empty:
            if pargs.allow_merge_failures:
                _logger.warning('Merge failures being allowed')
            else:
                _logger.error('Merge failures are not allowed')
                return 1

    merged_key_result_info, merge_failures = ResultInfoUtil.merge_raw_result_infos(
        key_to_results_infos,
        allow_merge_errors=False,
        wd_bases=pargs.wd_bases if len(pargs.wd_bases) > 0 else None)
    if len(merge_failures) > 0:
        _logger.error('There were merge failures')
        return 1
    # TODO: sort by key
    for key, merged_result in sorted(merged_key_result_info.items()):
        merged['results'].append(merged_result)

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(merged)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, merged)
    return 0
コード例 #19
0
def entryPoint(args):
    # pylint: disable=global-statement,too-many-branches,too-many-statements
    # pylint: disable=too-many-return-statements
    global _logger, futureToRunners
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument("--benchmark-base",
                        dest='benchmark_base_path',
                        type=str,
                        default="",
                        help="Prefix path for benchmarks")
    parser.add_argument("--dry",
                        action='store_true',
                        help="Stop after initialising runners")
    parser.add_argument(
        "-j",
        "--jobs",
        type=int,
        default="1",
        help="Number of jobs to run in parallel (Default %(default)s)")
    parser.add_argument("config_file", help="YAML configuration file")
    parser.add_argument("invocation_info", help="Invocation info file")
    parser.add_argument("working_dirs_root",
                        help="Directory to create working directories inside")
    parser.add_argument("yaml_output", help="path to write YAML output to")

    pargs = parser.parse_args(args)

    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    if pargs.jobs <= 0:
        _logger.error('jobs must be <= 0')
        return 1

    # Check benchmark base path
    if not os.path.isabs(pargs.benchmark_base_path):
        pargs.benchmark_base_path = os.path.abspath(pargs.benchmark_base_path)
    if not os.path.isdir(pargs.benchmark_base_path):
        _logger.error('Benchmark base path "{}" must be a directory'.format(
            pargs.benchmark_base_path))
        return 1
    if not os.path.isabs(pargs.benchmark_base_path):
        _logger.error('Benchmark base path "{}" must be absolute'.format(
            pargs.benchmark_base_path))
        return 1

    # Load runner configuration
    config, success = DriverUtil.loadRunnerConfig(pargs.config_file)
    if not success:
        return 1

    # Load invocation info
    try:
        with open(pargs.invocation_info, 'r') as f:
            # FIXME: Do name clean up invocation info and result info are now the same thing
            invocation_infos = ResultInfo.loadRawResultInfos(f)
    except Exception as e:  # pylint: disable=broad-except
        _logger.error(e)
        _logger.debug(traceback.format_exc())
        return 1

    if not check_paths(invocation_infos, pargs.benchmark_base_path):
        _logger.error('Problems with invocation infos')
        return 1

    # Misc data we will put into output result info file.
    output_misc_data = {
        'runner': config['runner'],
        'backend': config['runner_config']['backend']['name'],
        'jobs_in_parallel': pargs.jobs,
    }
    if 'misc' not in invocation_infos:
        invocation_infos['misc'] = {}
    invocation_infos['misc'].update(output_misc_data)
    output_misc_data = invocation_infos['misc']

    yamlOutputFile = os.path.abspath(pargs.yaml_output)

    if os.path.exists(yamlOutputFile):
        _logger.error(
            'yaml_output file ("{}") already exists'.format(yamlOutputFile))
        return 1

    # Setup the directory to hold working directories
    workDirsRoot = os.path.abspath(pargs.working_dirs_root)
    if os.path.exists(workDirsRoot):
        # Check its a directory and its empty
        if not os.path.isdir(workDirsRoot):
            _logger.error(
                '"{}" exists but is not a directory'.format(workDirsRoot))
            return 1

        workDirsRootContents = next(os.walk(workDirsRoot, topdown=True))
        if len(workDirsRootContents[1]) > 0 or len(
                workDirsRootContents[2]) > 0:
            _logger.error('"{}" is not empty ({},{})'.format(
                workDirsRoot, workDirsRootContents[1],
                workDirsRootContents[2]))
            return 1
    else:
        # Try to create the working directory
        try:
            os.mkdir(workDirsRoot)
        except Exception as e:  # pylint: disable=broad-except
            _logger.error(
                'Failed to create working_dirs_root "{}"'.format(workDirsRoot))
            _logger.error(e)
            _logger.debug(traceback.format_exc())
            return 1

    # Get Runner class to use
    RunnerClass = RunnerFactory.getRunnerClass(config['runner'])
    runner_ctx = RunnerContext.RunnerContext(num_parallel_jobs=pargs.jobs)

    if not 'runner_config' in config:
        _logger.error('"runner_config" missing from config')
        return 1

    if not isinstance(config['runner_config'], dict):
        _logger.error('"runner_config" should map to a dictionary')
        return 1

    rc = config['runner_config']

    # Create the runners
    runners = []
    for index, invocationInfo in enumerate(invocation_infos['results']):
        _logger.info('Creating runner {} out of {} ({:.1f}%)'.format(
            index + 1, len(invocation_infos['results']),
            100 * float(index + 1) / len(invocation_infos['results'])))
        # Create working directory for this runner
        # FIXME: This should be moved into the runner itself
        workDir = os.path.join(workDirsRoot, 'workdir-{}'.format(index))
        assert not os.path.exists(workDir)
        try:
            os.mkdir(workDir)
        except Exception as e:  # pylint: disable=broad-except
            _logger.error(
                'Failed to create working directory "{}"'.format(workDir))
            _logger.error(e)
            _logger.debug(traceback.format_exc())
            return 1
        # Pass in a copy of rc so that if a runner accidently modifies
        # a config it won't affect other runners.
        rc_copy = rc.copy()
        rc_copy['benchmark_base_path'] = pargs.benchmark_base_path
        rc_copy['output_base_path'] = workDirsRoot
        runners.append(
            RunnerClass(invocationInfo, workDir, rc_copy, runner_ctx))

    # Run the runners and build the report
    reports = []
    exitCode = 0

    if pargs.dry:
        _logger.info('Not running runners')
        return exitCode

    startTime = datetime.datetime.now()
    _logger.info('Starting {}'.format(startTime.isoformat(' ')))
    output_misc_data['start_time'] = str(startTime.isoformat(' '))

    if pargs.jobs == 1:
        _logger.info('Running jobs sequentially')
        for r in runners:
            try:
                r.run()
                reports.append(r.getResults())
            except KeyboardInterrupt:
                _logger.error('Keyboard interrupt')
                # This is slightly redundant because the runner
                # currently kills itself if KeyboardInterrupt is thrown
                r.kill()
                break
            except Exception:  # pylint: disable=broad-except
                _logger.error("Error handling:{}".format(r.program))
                _logger.error(traceback.format_exc())

                # Attempt to add the error to the reports
                errorLog = r.InvocationInfo.copy()
                errorLog['error'] = traceback.format_exc()
                reports.append(errorLog)
                exitCode = 1
    else:
        # FIXME: Make windows compatible
        # Catch signals so we can clean up
        signal.signal(signal.SIGINT, handleInterrupt)
        signal.signal(signal.SIGTERM, handleInterrupt)

        _logger.info('Running jobs in parallel')
        completedFutureCounter = 0
        import concurrent.futures
        try:
            with concurrent.futures.ThreadPoolExecutor(
                    max_workers=pargs.jobs) as executor:
                # Simple: One runner to one future mapping.
                futureToRunners = {
                    executor.submit(r.run): [r]
                    for r in runners
                }
                for future in concurrent.futures.as_completed(futureToRunners):
                    completed_runner_list = None
                    if isinstance(futureToRunners[future], list):
                        completed_runner_list = futureToRunners[future]
                    else:
                        assert isinstance(futureToRunners[future],
                                          SequentialRunnerHolder)
                        completed_runner_list = futureToRunners[
                            future].completed_runs()
                    for r in completed_runner_list:
                        _logger.debug('{} runner finished'.format(
                            r.programPathArgument))

                        if future.done() and not future.cancelled():
                            completedFutureCounter += 1
                            _logger.info('Completed {}/{} ({:.1f}%)'.format(
                                completedFutureCounter, len(runners),
                                100 * (float(completedFutureCounter) /
                                       len(runners))))

                        excep = None
                        try:
                            if future.exception():
                                excep = future.exception()
                        except concurrent.futures.CancelledError as e:
                            excep = e

                        if excep != None:
                            # Attempt to log the error reports
                            errorLog = r.InvocationInfo.copy()
                            r_work_dir = None
                            try:
                                r_work_dir = r.workingDirectoryWithoutPrefix
                            except Exception:
                                pass
                            errorLog['working_directory'] = r_work_dir
                            errorLog['error'] = "\n".join(
                                traceback.format_exception(
                                    type(excep), excep, None))
                            # Only emit messages about exceptions that aren't to do
                            # with cancellation
                            if not isinstance(
                                    excep, concurrent.futures.CancelledError):
                                _logger.error(
                                    '{} runner hit exception:\n{}'.format(
                                        r.programPathArgument,
                                        errorLog['error']))
                            reports.append(errorLog)
                            exitCode = 1
                        else:
                            reports.append(r.getResults())
        except KeyboardInterrupt:
            # The executor should of been cleaned terminated.
            # We'll then write what we can to the output YAML file
            _logger.error('Keyboard interrupt')
        finally:
            # Stop catching signals and just use default handlers
            signal.signal(signal.SIGINT, signal.SIG_DFL)
            signal.signal(signal.SIGTERM, signal.SIG_DFL)

    endTime = datetime.datetime.now()
    output_misc_data['end_time'] = str(endTime.isoformat(' '))
    output_misc_data['run_time'] = str(endTime - startTime)

    # Write result to YAML file
    invocation_infos['results'] = reports
    DriverUtil.writeYAMLOutputFile(yamlOutputFile, invocation_infos)

    _logger.info('Finished {}'.format(endTime.isoformat(' ')))
    _logger.info('Total run time: {}'.format(endTime - startTime))
    return exitCode
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos', type=argparse.FileType('r'))
    parser.add_argument('ii_template', type=argparse.FileType('r'))
    parser.add_argument('--benchmark-base',
                        dest="benchmark_base",
                        default="",
                        type=str)
    parser.add_argument('--wd-base', dest="wd_base", default="", type=str)
    parser.add_argument(
        '--dump-tags',
        dest="dump_tags",
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--timeout',
        type=float,
        default=None,
    )
    parser.add_argument(
        '--use-dsoes-wallclock-time',
        action='store_true',
        default=False,
    )
    parser.add_argument(
        '--bool-args',
        dest='bool_args',
        nargs='+',
        default=[],
    )
    parser.add_argument(
        '--output',
        default=sys.stdout,
        type=argparse.FileType('w'),
    )
    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)
    _logger.info('Using benchmark base of "{}"'.format(pargs.benchmark_base))
    _logger.info('Using working directory base of "{}"'.format(pargs.wd_base))

    extra_kwargs = {}
    bool_arg_re = re.compile(r'^([a-zA-z.]+)=(true|false)')
    for b in pargs.bool_args:
        m = bool_arg_re.match(b)
        if m is None:
            _logger.error('"{}" is not valid bool assignment'.format(b))
            return 1
        var_name = m.group(1)
        assignment = m.group(2)
        _logger.info('Adding extra param "{}" = {}'.format(
            var_name, assignment))
        if assignment == 'true':
            assignment_as_bool = True
        else:
            assert assignment == 'false'
            assignment_as_bool = False
        extra_kwargs[var_name] = assignment_as_bool

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
        _logger.info('Loading "{}"'.format(pargs.ii_template.name))
        ii_template = ResultInfo.loadRawResultInfos(pargs.ii_template)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    # Do grouping
    _logger.info('Performing merge')
    result_info_list = [result_infos, ii_template]
    key_to_result_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
        result_info_list)
    list_was_empty = True
    for index, reject_list in enumerate(rejected_result_infos):
        for reject in reject_list:
            list_was_empty = False
            key = ResultInfoUtil.get_result_info_key(reject)
            _logger.info('{} was rejected'.format(key))
    if not list_was_empty:
        return 1
    _logger.info('Merge complete')

    runner = result_infos['misc']['runner']
    _logger.info('Found runner "{}"'.format(runner))
    backend = None
    if 'backend' in result_infos['misc']:
        backend = result_infos['misc']['backend']
    _logger.info('Backend was "{}"'.format(backend))

    output_ri = {
        'results': [],
        'schema_version': result_info_list[0]['schema_version'],
    }

    event_analyser = event_analysis.get_event_analyser_from_runner_name(
        runner,
        soft_timeout=pargs.timeout,
        use_dsoes_wallclock_time=pargs.use_dsoes_wallclock_time,
        **extra_kwargs)
    tag_to_keys = dict()
    non_trivial_known_tags = {
        'jfs_generic_unknown',
        'timeout',
        'soft_timeout',
        'jfs_dropped_stdout_bug_unknown',
        'unsupported_bv_sort',
        'unsupported_fp_sort',
        'unsupported_sorts',
    }
    trivial_known_tags = {
        'sat',
        'jfs_dropped_stdout_bug_sat',
        'jfs_dropped_stdout_bug_unsat',
        'unsat',
    }
    trivial_keys = set()
    non_trivial_keys = set()
    for ri in result_infos['results']:
        key = ResultInfoUtil.get_result_info_key(ri)
        # Construct get event tag info
        geti = event_analysis.GETInfo(ri=ri,
                                      wd_base=pargs.wd_base,
                                      benchmark_base=pargs.benchmark_base,
                                      backend=backend)
        tag = event_analyser.get_event_tag(geti)
        if tag is None:
            _logger.error('Unhandled event for "{}"'.format(key))
            _logger.error(pprint.pformat(ri))
            return 1
        # The assumption here is that we are using JFS is dummy solving
        # mode. Benchmarks that aren't sat are non-trivial and so we should
        # annotate as such.
        is_trivial = False
        if tag in trivial_known_tags:
            is_trivial = True
            trivial_keys.add(key)
        else:
            if tag not in non_trivial_known_tags:
                _logger.error('Unsupported tag {} for {}'.format(tag, key))
                return 1
            non_trivial_keys.add(key)
        corresponding_ri = key_to_result_infos[key][1].copy()
        corresponding_ri['is_trivial'] = is_trivial
        output_ri['results'].append(corresponding_ri)

    _logger.info('# of trivial benchmarks: {}'.format(len(trivial_keys)))
    _logger.info('# of non-trivial benchmarks: {}'.format(
        len(non_trivial_keys)))

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(output_ri)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, output_ri)

    return 0
コード例 #21
0
def main(args):
    global _logger
    parser = argparse.ArgumentParser(description=__doc__)
    DriverUtil.parserAddLoggerArg(parser)
    parser.add_argument('result_infos',
                        type=argparse.FileType('r'))
    parser.add_argument('-o', '--output',
                        type=argparse.FileType('w'),
                        default=sys.stdout,
                        help='Output location (default stdout)')

    pargs = parser.parse_args(args)
    DriverUtil.handleLoggerArgs(pargs, parser)
    _logger = logging.getLogger(__name__)

    try:
        _logger.info('Loading "{}"'.format(pargs.result_infos.name))
        result_infos = ResultInfo.loadRawResultInfos(pargs.result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Loading done')

    keys_to_strip = [
        'sat',
        'wallclock_time',
        'working_directory',
        'exit_code',
        'out_of_memory',
        'stdout_log_file',
        'stderr_log_file',
        'user_cpu_time',
        'sys_cpu_time',
        'backend_timeout',
        'merged_result',
        'error',
        'dsoes_wallclock_time',
        'event_tag',
    ]

    for r in result_infos['results']:
        if 'expected_sat' in r and 'sat' in r:
            # Result might be merged so use `get_sat_from_result_info
            expected_sat, es_conflict = analysis.get_expected_sat_from_result_info(r)
            sat, s_conflict = analysis.get_sat_from_result_info(r)
            if es_conflict or s_conflict:
                _logger.warning('Found conflict for {}'.format(
                    r['benchmark']))
            # If the result is merged this will flatten the result
            if expected_sat == 'unknown' and sat != 'unknown':
                _logger.info('Copying over sat for {}'.format(r['benchmark']))
                r['expected_sat'] = sat
            else:
                _logger.debug('Preserving expected_sat')
                r['expected_sat'] = expected_sat
        # strip keys
        for key in keys_to_strip:
            if key in r:
                r.pop(key, None)

    if 'misc' in result_infos:
        result_infos.pop('misc')

    # Validate against schema
    try:
        _logger.info('Validating result_infos')
        ResultInfo.validateResultInfos(result_infos)
    except ResultInfo.ResultInfoValidationError as e:
        _logger.error('Validation error:\n{}'.format(e))
        return 1
    _logger.info('Validation complete')

    smtrunner.util.writeYaml(pargs.output, result_infos)
    return 0