# check to see if we are supposed to log the result print('benchmark result ===> time=%.2f' + str_mst_weight) % time_sec if trial_num < 0 and for_time: return # extract properties of the graph try: ti = extract_input_footer(input_graph) except ExtractInputFooterError, e: raise CheckerError( "run test error: unable to extract the input footer for %s: %s" % (rel_input_graph, str(e))) # log the result if for_time: data = PerfResult(ti.num_verts, ti.num_edges, ti.seed, rev, trial_num, time_sec, mst_weight) try: DataSet.add_data_to_log_file(data) except DataError, e: print >> sys.stderr, "Unable to log result to file %s (was trying to log %s): %s" % ( data.get_path(), str(data), str(e)) else: data = WeightResult(ti.dims, ti.num_verts, ti.seed, rev, trial_num, mst_weight) try: DataSet.add_data_to_log_file(data) except DataError, e: print >> sys.stderr, "Unable to log result to file %s (was trying to log %s): %s" % ( data.get_path(), str(data), str(e))
elif options.inputs_list_file is not None: input_solns = DataSet.read_from_file(InputSolution, options.inputs_list_file) # prepare for a correctness data collection if options.correctness: num_on += 1 get_results_for_rev = lambda rev : DataSet.read_from_file(CorrResult, CorrResult.get_path_to(rev)) options.inputs_list_file_arg = '' if options.inputs_list_file is None else ' -l ' + options.inputs_list_file collect_missing_data = lambda w,x,y,z: collect_missing_correctness_data(w,x,y,z,options.inputs_list_file_arg) # make sure no more than 1 type of data collection was specified if num_on > 1: parser.error('at most one of -c, -d, and -e may be specified') elif num_on == 0: # prepare for a performance data collection (default if nothing else is specified) get_results_for_rev = lambda rev : DataSet.read_from_file(PerfResult, PerfResult.get_path_to(rev)) collect_missing_data = collect_missing_performance_data # prepare the inputs and revisions for non-weight data collection schemes if options.num_vertices == 0: # get all performance inputs if we are not collecting for a single graph if input_solns is None: input_path = InputSolution.get_path_to(1, 0, 0, 100000) input_solns = DataSet.read_from_file(InputSolution, input_path) # prepare the revisions to collect data for if options.rev is not None: if options.rev.lower() == 'all': revs = get_tracked_revs() else: revs = [options.rev]
def gather_perf_data(alg, rev, index, latest): """Gathers performance data for a single revision of an algorithm""" print 'gathering perf data for %s (rev=%s index=%u latest=%s)' % (alg, rev, index, str(latest)) # get the results results = {} # maps (|V|, |E|) to ResultAccumulator ds = DataSet.read_from_file(PerfResult, PerfResult.get_path_to(rev)) for data in ds.dataset.values(): key = (data.input().num_verts, data.input().num_edges) result = results.get(key) if result is None: result = ResultAccumulator(data.time_sec) result.defaultCI = DEFAULT_CI results[key] = result else: result.add_data(data.time_sec) # put the results in order keys_density = results.keys() keys_density.sort(density_compare) keys_pom = results.keys() keys_pom.sort(pom_compare) keys = {} keys['density'] = keys_density keys['pom'] = keys_pom # compute stats for all the results for num_verts in results.keys(): results[num_verts].compute_stats() # generate dat files for each x-axis cross important vertex counts for xaxis in keys: if xaxis == 'pom': computex = lambda v, e : get_percent_of_max(v, e) elif xaxis == 'density': computex = lambda v, e : get_density(v, e) else: print >> sys.stderr, "unexpected x-axis value: " + str(xaxis) sys.exit(-1) header_txt = '#|V|\t|E|\t' + xaxis + '\tLower\tAverage\tUpper\t#Runs (Lower/Upper from ' + str(DEFAULT_CI) + '% CI)' for vip in IMPORTANT_VERTS: # open a file to output to dat = get_output_dat_name(xaxis, alg, rev, index, vip) print 'creating ' + dat if latest: latest_fn = make_latest(xaxis, alg, rev, index, vip) try: fh = open(dat, 'w') # compute relevant stats and output them print >> fh, header_txt count = 0 for (v, e) in keys[xaxis]: if vip=='all' or vip==v: count += 1 r = results[(v, e)] x = computex(v, e) print >> fh, '%u\t%u\t%.6f\t%.3f\t%.3f\t%.3f\t%u' % (v, e, x, r.lower99, r.mean, r.upper99, len(r.values)) fh.close() # don't create empty files if count == 0: quiet_remove(dat) if latest: quiet_remove(latest_fn) except IOError, e: print sys.stderr, "failed to write file: " + str(e) return -1
str_mst_weight = '' # check to see if we are supposed to log the result print ('benchmark result ===> time=%.2f'+str_mst_weight) % time_sec if trial_num < 0 and for_time: return # extract properties of the graph try: ti = extract_input_footer(input_graph) except ExtractInputFooterError, e: raise CheckerError("run test error: unable to extract the input footer for %s: %s" % (rel_input_graph, str(e))) # log the result if for_time: data = PerfResult(ti.num_verts, ti.num_edges, ti.seed, rev, trial_num, time_sec, mst_weight) try: DataSet.add_data_to_log_file(data) except DataError, e: print >> sys.stderr, "Unable to log result to file %s (was trying to log %s): %s" % (data.get_path(), str(data), str(e)) else: data = WeightResult(ti.dims, ti.num_verts, ti.seed, rev, trial_num, mst_weight) try: DataSet.add_data_to_log_file(data) except DataError, e: print >> sys.stderr, "Unable to log result to file %s (was trying to log %s): %s" % (data.get_path(), str(data), str(e)) def test_mst(is_test_perf, mst_binary, input_graph, out, do_log, rev, trial_num): trial_num = -1 if not do_log else trial_num benchmark(mst_binary, input_graph, out, rev, trial_num, is_test_perf)