def do_all(name, g, i, t, data): global options assert options filename = '' if 'metrics' in options.operations: stats, filename = metrics.do_metrics(options, name, g) filename = filename.replace('data_out', 'data_vis') else: controllers = metrics.get_controllers(g, options) exp_filename = metrics.get_filename(name, options, controllers) if not os.path.exists(exp_filename + '.json'): raise Exception("invalid file path: %s" % exp_filename) print("exp_filename") print(exp_filename) input_file = open(exp_filename + '.json', 'r') stats = json.load(input_file) filename = exp_filename.replace('data_out', 'data_vis') if 'cdfs' in options.operations: plot_cdfs.do_cdfs(options, stats, filename) if 'ranges' in options.operations: plot_ranges.do_ranges(options, stats, filename, 'os3e') if 'pareto' in options.operations: plot_pareto.do_pareto(options, stats, filename) if 'cloud' in options.operations: plot_cloud.do_cloud(options, stats, filename, 'png')
def do_all(options, name, g, i, t, data): filename = '' if 'metrics' in options.operations: stats, filename = metrics.do_metrics(options, name, g) filename = filename.replace('data_out', 'data_vis') else: controllers = metrics.get_controllers(g, options) exp_filename = metrics.get_filename(name, options, controllers) if not os.path.exists(exp_filename + '.json'): raise Exception("invalid file path: %s" % exp_filename) input_file = open(exp_filename + '.json', 'r') stats = json.load(input_file) filename = exp_filename.replace('data_out', 'data_vis') if 'cdfs' in options.operations: plot_cdfs.do_cdfs(options, stats, filename) if 'ranges' in options.operations: plot_ranges.do_ranges(options, stats, filename) if 'pareto' in options.operations: plot_pareto.do_pareto(options, stats, filename) if 'cloud' in options.operations: plot_cloud.do_cloud(options, stats, filename, 'png')
csv_fields = ['metric', 'ptype', 'name', 'topo', 'n', 'e', 'w', 'gdf_name', 'x', 'y'] csv_tuples = [] for i, topo in enumerate(topos): if not options.max == None and i >= options.max: break g, usable, note = get_topo_graph(topo) if usable: w = total_weight(g) # Check for --force here? print "usable topo: %s" % topo total_usable += 1 controllers = metrics.get_controllers(g, options) exp_filename = metrics.get_filename(topo, options, controllers) path_exists = os.path.exists(exp_filename + '.json') compute_metrics = 'metrics' in options.operations # Compute data only when the following conditions hold: # - asked to complete metrics, AND # - asked to force, or data is missing if compute_metrics and (options.force or not path_exists): print "freshly analyzing topo: %s" % topo stats, filename = metrics.do_metrics(options, topo, g) filename = filename.replace('data_out', 'data_vis') total_used += 1 # Otherwise, load the data: else: if not os.path.exists(exp_filename + '.json'):
if options.all_topos: topos = sorted(zoo_topos()) else: topos = options.topos topo_test = nx.Graph() t = len(topos) ignored = [] successes = [] #g_unified = nx.Graph() #unified graph #g_unified = nx.union_all(topos) #unify all given graphs for i, topo in enumerate(topos): print "topo %s of %s: %s" % (i + 1, t, topo) g, usable, note = get_topo_graph(topo) #g_unified = nx.union(g,topo_test)#unify graphs exp_filename = metrics.get_filename(topo, options) if not g: raise Exception("WTF?! null graph: %s" % topo) elif not options.force and os.path.exists(exp_filename + '.json'): # Don't bother doing work if our metrics are already there. print "skipping already-analyzed topo: %s" % topo ignored.append(topo) elif not has_weights(g): ignored.append(topo) print "no weights for %s, skipping" % topo else: do_all(topo, g, 1, 1, None, mylist) successes.append(topo)