def plot_history(train_history: List[float], val_history: List[float], plot_dir: str, port_name: str, start_time: str, training_type: str, source_port_name: str = None, config_uid: int = None, tune_train_history: List[float] = None, tune_val_history: List[float] = None) -> None: path = os.path.join( plot_dir, port_name, encode_loss_history_plot(training_type, port_name, start_time, source_port_name, config_uid)) title = f"Training loss ({training_type})" if tune_train_history is not None and tune_val_history is not None: history = (train_history + tune_train_history, val_history + tune_val_history) else: history = (train_history, val_history) x_vline = len(train_history) - 1 if tune_train_history is not None and len( tune_train_history) > 0 else None plot_series(series=history, title=title, x_label="Epoch", y_label="Loss", legend_labels=["Training", "Validation"], path=path, x_vline=x_vline, x_vline_label="Start fine tuning", mark_min=[1])
def plot_training(loss_history: Tuple[List[float], List[float]], plot_dir: str, plot_title: str, port: Port, start_time: str, training_type: str): plot_linear_path = os.path.join(plot_dir, encode_loss_history_plot(training_type, port.name, start_time)) # plot_log_path = os.path.join(plot_dir, encode_loss_history_plot(training_type, port.name, start_time)) plot_series(series=loss_history, title=plot_title, x_label="Epoch", y_label="Loss", legend_labels=["Training", "Validation"], path=plot_linear_path)
def main(): onos_timestamps = [] start_time = None with open(onos_data_fp, 'r') as f: for line in csv.reader(f): try: t = float(line[0]) / 60 if start_time: onos_timestamps.append(t - start_time) else: start_time = float(t) onos_timestamps.append(0.0) except e: print "Failed to parse", line continue # print "file read" avenir_deltas = [] with open("../fabric.csv", 'r') as f: for line in csv.DictReader(f): try: avenir_deltas.append(float(line['time']) / (1000.0 * 60.0)) except e: print "Failed to parse", line continue print "file read" avenir_delay = 0.0 avenir_ts = [] for o_send_time, a_delta in zip(onos_timestamps, avenir_deltas): avenir_delay = avenir_delay + a_delta avenir_ts.append(avenir_delay) num_rules = len(avenir_ts) # onos_timestamps = [float(i) * (1.0 / float(40000)) * 15.0 for i in xrange(40000)] print "normalizing onos" onos_time_series = { t: 100.0 * float(i) / float(num_rules) for (i, t) in enumerate(onos_timestamps) } print "normalizing avenir" avenir_time_series = { t: 100.0 * float(i) / float(num_rules) for (i, t) in enumerate(avenir_ts) } print "plotting" plotter.plot_series(avenir_time_series)
def main(): cleanup() baseline = "cat benchmarks/bmv2/mininet/{0}_solution.txt".format(args.rules) print "running cold-start" data0 = experiment(args.num_hosts, args.mode, experiment_cmd(run_avenir(""), "cold")) cleanup() print "running hot-start" data1 = experiment(args.num_hosts, args.mode, experiment_cmd(run_avenir("--hot-start"), "hot")) data1 = normalize(data1,"/tmp/cache_build_time_hot") cleanup() print "running baseline" data2 = experiment(args.num_hosts, args.mode, experiment_cmd(baseline, "base")) cleanup() plotter.plot_series(data1, data0, data2)
def plot_predictions(y_true: np.ndarray, y_pred: np.ndarray, plot_dir: str, port_name: str, start_time: str, training_type: str, base_port_name: str = None, config_uid: int = None) -> None: path = os.path.join( plot_dir, port_name, encode_x_y_plot(training_type, port_name, start_time, base_port_name, config_uid)) title = f"Labels and Predictions Port {port_name} ({training_type})" plot_series(series=(list(y_true), list(y_pred)), title=title, x_label="Training Example", y_label="Target Variable: ETA in Minutes", legend_labels=["Label", "Prediction"], path=path)
def main(): experiments = [ "self", "action_decomp", "metadata", "early_validate", "double", "choice" ] if args.run: for exp in experiments: print exp os.system("sh {0}.sh | tee {0}.csv".format(exp)) os.system("sh {0}_hot.sh | tee {0}_hot.csv".format(exp)) # for exp in experiments: # print "plotting", exp, "data" # plotter.plot_series(data_sets = [(parse_data(exp),"cold start"), # (parse_data(exp + "_hot"), "hot start")], # name = exp, # xlabel = (exp + " time (s)"), # ylabel = "% completed") print "generating graphs" plotter.plot_series(data_sets=[ (parse_data("self"), "logical"), (parse_data("action_decomp"), "action_decompose"), (parse_data("metadata"), "metadata"), (parse_data("early_validate"), "early_validate"), (parse_data("double"), "double"), (parse_data("choice"), "choice"), (parse_data("self_hot"), "hot start logical"), (parse_data("action_decomp_hot"), "hot start action_decompose"), (parse_data("metadata_hot"), "hot start metadata"), (parse_data("early_validate_hot"), "hot start early_validate"), (parse_data("double_hot"), "hot start double"), (parse_data("choice_hot"), "hot start choice") ], xlabel="time (s)", ylabel="completion %", name="retargeting")