Пример #1
0
def gather_weight_data(wtype):
    # get the results
    results = {}  # maps |V| to ResultAccumulator
    ds = DataSet.read_from_file(WeightResult, WeightResult.get_path_to(wtype))
    for data in ds.dataset.values():
        result = results.get(data.input().num_verts)
        if result is None:
            result = ResultAccumulator(data.mst_weight)
            results[data.input().num_verts] = result
        else:
            result.add_data(data.mst_weight)

    try:
        # open a file to output to
        fh = open(DATA_PATH + wtype + '.dat', 'w')

        # compute relevant stats and output them
        print >> fh, '#|V|\tLower\tAverage\tUpper  (Lower/Upper from 99% CI)'
        keys = results.keys()
        keys.sort()
        for num_verts in keys:
            r = results[num_verts]
            r.compute_stats()
            if len(r.values) > 1:
                print >> fh, '%u\t%.3f\t%.3f\t%.3f\t%u' % (
                    num_verts, r.lower99, r.mean, r.upper99, len(r.values))
        fh.close()
        return 0
    except IOError, e:
        print sys.stderr, "failed to write file: " + str(e)
        return -1
Пример #2
0
def gather_weight_data(wtype):
    # get the results
    results = {} # maps |V| to ResultAccumulator
    ds = DataSet.read_from_file(WeightResult, WeightResult.get_path_to(wtype))
    for data in ds.dataset.values():
        result = results.get(data.input().num_verts)
        if result is None:
            result = ResultAccumulator(data.mst_weight)
            results[data.input().num_verts] = result
        else:
            result.add_data(data.mst_weight)

    try:
        # open a file to output to
        fh = open(DATA_PATH + wtype + '.dat', 'w')

        # compute relevant stats and output them
        print >> fh, '#|V|\tLower\tAverage\tUpper  (Lower/Upper from 99% CI)'
        keys = results.keys()
        keys.sort()
        for num_verts in keys:
            r = results[num_verts]
            r.compute_stats()
            if len(r.values) > 1:
                print >> fh, '%u\t%.3f\t%.3f\t%.3f\t%u' % (num_verts, r.lower99, r.mean, r.upper99, len(r.values))
        fh.close()
        return 0
    except IOError, e:
        print sys.stderr, "failed to write file: " + str(e)
        return -1
Пример #3
0
    except ExtractInputFooterError, e:
        raise CheckerError(
            "run test error: unable to extract the input footer for %s: %s" %
            (rel_input_graph, str(e)))

    # log the result
    if for_time:
        data = PerfResult(ti.num_verts, ti.num_edges, ti.seed, rev, trial_num,
                          time_sec, mst_weight)
        try:
            DataSet.add_data_to_log_file(data)
        except DataError, e:
            print >> sys.stderr, "Unable to log result to file %s (was trying to log %s): %s" % (
                data.get_path(), str(data), str(e))
    else:
        data = WeightResult(ti.dims, ti.num_verts, ti.seed, rev, trial_num,
                            mst_weight)
        try:
            DataSet.add_data_to_log_file(data)
        except DataError, e:
            print >> sys.stderr, "Unable to log result to file %s (was trying to log %s): %s" % (
                data.get_path(), str(data), str(e))


def test_mst(is_test_perf, mst_binary, input_graph, out, do_log, rev,
             trial_num):
    trial_num = -1 if not do_log else trial_num
    benchmark(mst_binary, input_graph, out, rev, trial_num, is_test_perf)


__input_graph_to_cleanup = None
__files_to_cleanup = []
Пример #4
0
def main():
    usage = """usage: %prog [options]
Searches for missing results and uses run_test.py to collect it."""
    parser = OptionParser(usage)
    parser.add_option("-i", "--input_graph",
                      metavar="FILE",
                      help="restrict the missing data check to the specified input graph")
    parser.add_option("-l", "--inputs-list-file",
                      metavar="FILE",
                      help="collect data for all inputs in the specified log file")
    parser.add_option("--list-only",
                      action="store_true", default=False,
                      help="only list missing data (do not collect it)")
    parser.add_option("-n", "--num-runs",
                      type="int", default="1",
                      help="number of desired runs per revision-input combination [default: 1]")
    parser.add_option("-r", "--rev",
                      help="restrict the missing data check to the specified revision, or 'all' [default: current]")

    group = OptionGroup(parser, "Data Collection Options")
    group.add_option("-p", "--performance",
                      action="store_true", default=True,
                      help="collect performance data (this is the default)")
    group.add_option("-c", "--correctness",
                      action="store_true", default=False,
                      help="collect correctness data")
    parser.add_option_group(group)

    group2 = OptionGroup(parser, "Weight (Part II) Data Collection Options")
    group2.add_option("-v", "--num_vertices",
                      metavar="V", type="int", default=0,
                      help="collect weight data for V vertices (requires -d or -e)")
    group2.add_option("-d", "--dims",
                      metavar="D", type="int", default=0,
                      help="collect weight data for randomly positioned vertices in D-dimensional space (requires -v)")
    group2.add_option("-e", "--edge",
                      action="store_true", default=False,
                      help="collect weight data for random uniform edge weights in the range (0, 1] (requires -v)")
    parser.add_option_group(group2)

    (options, args) = parser.parse_args()
    if len(args) > 0:
        parser.error("too many arguments")

    if options.num_runs < 1:
        parser.error("-n must be at least 1")
    input_solns = None

    # prepare for a weight data collection
    num_on = 0
    weight_test = False
    if options.num_vertices > 0:
        weight_test = True
        if options.input_graph or options.inputs_list_file:
            parser.error('-i, -l, and -v are mutually exclusive')

        if options.dims > 0:
            num_on += 1
            wtype = 'loc%u' % options.dims

        if options.edge:
            num_on += 1
            wtype = 'edge'

        if num_on == 0:
            parser.error('-v requires either -d or -e be specified too')

        if options.num_runs > 1:
            options.num_runs = 1
            print 'warning: -v truncates the number of runs to 1 (weight should not change b/w runs)'

        input_path = InputSolution.get_path_to(15, options.dims, 0.0, 1.0)
        print 'reading inputs to run on from ' + input_path
        input_solns = DataSet.read_from_file(InputSolution, input_path)
        revs = [None] # not revision-specific (assuming our alg is correct)
        get_results_for_rev = lambda _ : DataSet.read_from_file(WeightResult, WeightResult.get_path_to(wtype))
        collect_missing_data = collect_missing_weight_data
    elif options.dims > 0 or options.edge:
        parser.error('-v is required whenever -d or -e is used')

    # handle -i, -l: collect data for a particular graph(s)
    if options.input_graph and options.inputs_list_file:
        parser.error('-i and -l are mutually exclusive')
    if options.input_graph is not None:
        try:
            i = extract_input_footer(options.input_graph)
        except ExtractInputFooterError, e:
            parser.error(e)
        input_solns = DataSet({0:InputSolution(i.prec,i.dims,i.min,i.max,i.num_verts,i.num_edges,i.seed)})