def black_box_function(bandwidth_lower_bound: float, bandwidth_upper_bound: float, delay: float, queue: float, loss: float, T_s: float, delay_noise: float, heuristic, model_path: str, save_dir: str = "") -> float: global black_box_function_calling_times save_dir = os.path.join(save_dir, 'config_{}'.format( black_box_function_calling_times % 15)) black_box_function_calling_times += 1 heuristic_rewards = [] rl_method_rewards = [] if loss < -4: loss = 0 else: loss = 10**loss traces = [generate_trace( duration_range=(30, 30), bandwidth_lower_bound_range=( 10**bandwidth_lower_bound, 10**bandwidth_lower_bound), bandwidth_upper_bound_range=( 10**bandwidth_upper_bound, 10**bandwidth_upper_bound), delay_range=(delay, delay), loss_rate_range=(loss, loss), queue_size_range=(queue, queue), T_s_range=(T_s, T_s), delay_noise_range=(delay_noise, delay_noise)) for _ in range(10)] # print("trace generation used {}s".format(time.time() - t_start)) save_dirs = [os.path.join(save_dir, 'trace_{}'.format(i)) for i in range(10)] for trace, save_dir in zip(traces, save_dirs): os.makedirs(save_dir, exist_ok=True) trace.dump(os.path.join(save_dir, 'trace.json')) if not heuristic: for trace in traces: heuristic_rewards.append(trace.optimal_reward) else: t_start = time.time() # save_dirs = [""] * len(traces) hret = heuristic.test_on_traces(traces, save_dirs, True, 8) for heuristic_mi_level_reward, heuristic_pkt_level_reward in hret: # heuristic_rewards.append(heuristic_mi_level_reward) heuristic_rewards.append(heuristic_pkt_level_reward) print("heuristic used {}s".format(time.time() - t_start)) t_start = time.time() rl_ret = test_on_traces(model_path, traces, save_dirs, 8, 20, record_pkt_log=False, plot_flag=True) for rl_mi_level_reward, rl_pkt_level_reward in rl_ret: # rl_method_rewards.append(rl_mi_level_reward) rl_method_rewards.append(rl_pkt_level_reward) print("rl used {}s".format(time.time() - t_start)) gap = float(np.mean(np.array(heuristic_rewards)) - np.mean(np.array(rl_method_rewards))) return gap
def __init__(self, seed: int, log_dir: str, timesteps_per_actorbatch: int, pretrained_model_path=None, gamma: float = 0.99, tensorboard_log=None, delta_scale=1, record_pkt_log: bool = False): init_start = time.time() self.record_pkt_log = record_pkt_log self.comm = COMM_WORLD self.delta_scale = delta_scale self.seed = seed self.log_dir = log_dir self.pretrained_model_path = pretrained_model_path self.steps_trained = 0 dummy_trace = generate_trace( (10, 10), (2, 2), (2, 2), (50, 50), (0, 0), (1, 1), (0, 0), (0, 0)) env = gym.make('PccNs-v0', traces=[dummy_trace], train_flag=True, delta_scale=self.delta_scale) # Load pretrained model # print('create_dummy_env,{}'.format(time.time() - init_start)) if pretrained_model_path is not None: if pretrained_model_path.endswith('.ckpt'): self.model = MyPPO1(MyMlpPolicy, env, verbose=1, seed=seed, optim_stepsize=0.001, schedule='constant', timesteps_per_actorbatch=timesteps_per_actorbatch, optim_batchsize=int( timesteps_per_actorbatch/12), optim_epochs=12, gamma=gamma, tensorboard_log=tensorboard_log, n_cpu_tf_sess=1) with self.model.graph.as_default(): saver = tf.train.Saver() saver.restore(self.model.sess, pretrained_model_path) try: self.steps_trained = int(os.path.splitext( pretrained_model_path)[0].split('_')[-1]) except: self.steps_trained = 0 # print('tf_restore,{}'.format(time.time()-tf_restore_start)) else: # model is a tensorflow model to serve self.model = LoadedModel(pretrained_model_path) else: self.model = MyPPO1(MyMlpPolicy, env, verbose=1, seed=seed, optim_stepsize=0.001, schedule='constant', timesteps_per_actorbatch=timesteps_per_actorbatch, optim_batchsize=int(timesteps_per_actorbatch/12), optim_epochs=12, gamma=gamma, tensorboard_log=tensorboard_log, n_cpu_tf_sess=1) self.timesteps_per_actorbatch = timesteps_per_actorbatch
def main(): set_seed(42) dummy_trace = generate_trace(duration_range=(10, 10), bandwidth_lower_bound_range=(0.1, 0.1), bandwidth_upper_bound_range=(12, 12), delay_range=(25, 25), loss_rate_range=(0.0, 0.0), queue_size_range=(1, 1), T_s_range=(3, 3), delay_noise_range=(0, 0)) dummy_trace.dump(os.path.join(SAVE_DIR, "test_trace.json")) genet = Aurora(seed=20, log_dir=SAVE_DIR, pretrained_model_path=MODEL_PATH, timesteps_per_actorbatch=10, record_pkt_log=True) t_start = time.time() print(genet.test(dummy_trace, SAVE_DIR, True, saliency=True)) print("aurora", time.time() - t_start)
def multiple_runs(aurora_models, bw, delay, loss, queue, aurora_save_dir, duration, plot_only): test_traces = [generate_trace((duration, duration), (bw, bw), (delay, delay), (loss, loss), (queue, queue))] rewards = [] for aurora in aurora_models: aurora.log_dir = os.path.join( aurora_save_dir, os.path.splitext(os.path.basename(aurora.pretrained_model_path))[0]) os.makedirs(aurora.log_dir, exist_ok=True) t_start = time.time() if not plot_only: aurora.test(test_traces) print("run {} on bw={}Mbps, delay={}ms, loss={}, " "queue={}packets, duration={}s, used {:.3f}s".format( aurora.pretrained_model_path, bw, delay, loss, queue, duration, time.time() - t_start)) df = pd.read_csv(os.path.join(aurora.log_dir, "aurora_test_log.csv")) rewards.append(df['reward'].mean()) return np.mean(np.array(rewards))
def main(): args = parse_args() set_seed(args.seed) if args.save_dir: os.makedirs(args.save_dir, exist_ok=True) if args.trace_file is not None and args.trace_file.endswith('.json'): test_traces = [Trace.load_from_file(args.trace_file)] elif args.trace_file is not None and args.trace_file.endswith('.log'): test_traces = [ Trace.load_from_pantheon_file(args.trace_file, args.delay, args.loss, args.queue) ] elif args.config_file is not None: test_traces = generate_traces(args.config_file, 1, args.duration, constant_bw=not args.time_variant_bw) else: test_traces = [ generate_trace((args.duration, args.duration), (args.bandwidth, args.bandwidth), (args.delay, args.delay), (args.loss, args.loss), (args.queue, args.queue), (60, 60), (60, 60), constant_bw=not args.time_variant_bw) ] # print(test_traces[0].bandwidths) aurora = Aurora(seed=args.seed, timesteps_per_actorbatch=10, log_dir=args.save_dir, pretrained_model_path=args.model_path, delta_scale=args.delta_scale) results, pkt_logs = aurora.test_on_traces(test_traces, [args.save_dir]) for pkt_log in pkt_logs: with open(os.path.join(args.save_dir, "aurora_packet_log.csv"), 'w', 1) as f: pkt_logger = csv.writer(f, lineterminator='\n') pkt_logger.writerows(pkt_log)
def main(): args = parse_args() set_seed(args.seed) if args.save_dir: os.makedirs(args.save_dir, exist_ok=True) df = pd.read_csv(args.log_file, sep='\t') assert isinstance(df, pd.DataFrame) latest_step = int(df['num_timesteps'].iloc[-1]) assert os.path.exists( os.path.join(os.path.dirname(args.log_file), "model_step_{}.ckpt.meta".format(latest_step))) latest_model_path = os.path.join(os.path.dirname(args.log_file), "model_step_{}.ckpt".format(latest_step)) aurora = Aurora(seed=args.seed, timesteps_per_actorbatch=10, log_dir="", pretrained_model_path=latest_model_path) bbr = BBR(True) cubic = Cubic(True) test_traces = [] trace_dirs = [] for noise in [0, 20]: for bw in [20, 50]: tr = generate_trace((30, 30), (bw, bw), (bw, bw), (25, 25), (0, 0), (0.1, 0.1), (60, 60), (noise, noise)) test_traces.append(tr) for _ in range(5): test_traces.append( generate_trace((30, 30), (0.1, 0.1), (20, 20), (50, 100), (0, 0), (0.5, 1), (10, 10), (0, 10))) test_traces.append( generate_trace((30, 30), (10, 10), (100, 100), (50, 100), (0, 0), (0.5, 1), (10, 10), (0, 10))) for i, tr in enumerate(test_traces): os.makedirs(os.path.join(args.save_dir, 'trace_{}'.format(i)), exist_ok=True) tr.dump(os.path.join(args.save_dir, 'trace_{}'.format(i), 'trace.json')) trace_dirs.append(os.path.join(args.save_dir, 'trace_{}'.format(i))) t_start = time.time() aurora_pkt_level_rewards = [] for tr, save_dir in zip(test_traces, trace_dirs): _, pkt_level_reward = aurora.test(tr, save_dir, True) aurora_pkt_level_rewards.append(pkt_level_reward) print('aurora', time.time() - t_start) t_start = time.time() bbr_results = bbr.test_on_traces(test_traces, trace_dirs, True) print('bbr', time.time() - t_start) t_start = time.time() cubic_results = cubic.test_on_traces(test_traces, trace_dirs, True) print('cubic', time.time() - t_start) bbr_pkt_level_rewards = [val for _, val in bbr_results] cubic_pkt_level_rewards = [val for _, val in cubic_results] mean_rewards = [ np.mean(aurora_pkt_level_rewards), np.mean(bbr_pkt_level_rewards), np.mean(cubic_pkt_level_rewards) ] reward_errs = [ np.std(aurora_pkt_level_rewards), np.std(bbr_pkt_level_rewards), np.std(cubic_pkt_level_rewards) ] plt.bar([1, 2, 3], mean_rewards, yerr=reward_errs, width=0.5) plt.xticks([1, 2, 3], ['aurora', 'bbr', 'cubic']) plt.ylabel('Test Reward') plt.tight_layout() plt.savefig(os.path.join(args.save_dir, 'test_cc.jpg'))
def main(): bbr = BBR(True) cubic = Cubic(True) # cubic = Cubic(20) genet = Aurora(seed=20, log_dir=RESULT_ROOT, pretrained_model_path=GENET_MODEL_PATH, timesteps_per_actorbatch=10, delta_scale=1) udr_small = Aurora(seed=20, log_dir=RESULT_ROOT, pretrained_model_path=UDR_SMALL_MODEL_PATH, timesteps_per_actorbatch=10, delta_scale=1) udr_mid = Aurora(seed=20, log_dir=RESULT_ROOT, pretrained_model_path=UDR_MID_MODEL_PATH, timesteps_per_actorbatch=10, delta_scale=1) udr_large = Aurora(seed=20, log_dir=RESULT_ROOT, pretrained_model_path=UDR_LARGE_MODEL_PATH, timesteps_per_actorbatch=10, delta_scale=1) # for _ in range(10): # DEFAULT_CONFIGS.append( # { # "bandwidth": 10 ** np.random.uniform(np.log10(1), np.log10(10), 1).item(), # "delay": np.random.uniform(5, 200, 1).item(), # "loss": np.random.uniform(0, 0.0, 1).item(), # "queue": 10 ** np.random.uniform(np.log10(2), np.log10(30), 1).item(), # "T_s": np.random.randint(0, 6, 1).item(), # "delay_noise": np.random.uniform(0, 0, 1).item()}) # print(DEFAULT_CONFIGS) udr_train_config = read_json_file(UDR_TRAIN_CONFIG_FILE)[0] # print(udr_train_config) for dim, unit in zip(DIMS_TO_VARY, DIM_UNITS): print(dim, udr_train_config[dim]) if dim == 'bandwidth': vals_to_test = 10**np.linspace( np.log10(1), np.log10(udr_train_config[dim][1]), 10) elif dim == 'queue': vals_to_test = 10**np.linspace( np.log10(udr_train_config[dim][0]), np.log10(udr_train_config[dim][1]), 10) elif dim == 'loss': vals_to_test = np.linspace(0, 0.005, 10) else: vals_to_test = np.linspace( udr_train_config[dim][0], udr_train_config[dim][1], 10) print(vals_to_test) bbr_avg_rewards = [] bbr_reward_errs = [] cubic_avg_rewards = [] cubic_reward_errs = [] udr_small_avg_rewards = [] udr_small_reward_errs = [] udr_mid_avg_rewards = [] udr_mid_reward_errs = [] udr_large_avg_rewards = [] udr_large_reward_errs = [] genet_avg_rewards = [] genet_reward_errs = [] for val_idx, val in enumerate(vals_to_test): if dim == 'bandwidth': max_bw = val else: max_bw = DEFAULT_VALUES['bandwidth'] if dim == 'delay': min_delay, max_delay = val, val else: min_delay, max_delay = DEFAULT_VALUES['delay'], DEFAULT_VALUES['delay'] if dim == 'loss': min_loss, max_loss = val, val else: min_loss, max_loss = DEFAULT_VALUES['loss'], DEFAULT_VALUES['loss'] if dim == 'queue': min_queue, max_queue = val, val else: min_queue, max_queue = DEFAULT_VALUES['queue'], DEFAULT_VALUES['queue'] if dim == 'T_s': min_T_s, max_T_s = val, val else: min_T_s, max_T_s = DEFAULT_VALUES['T_s'], DEFAULT_VALUES['T_s'] # generate n=10 traces for each config traces = [generate_trace(duration_range=(30, 30), bandwidth_range=(1, max_bw), delay_range=(min_delay, max_delay), loss_rate_range=(min_loss, max_loss), queue_size_range=(min_queue, max_queue), T_s_range=(min_T_s, max_T_s), delay_noise_range=(0, 0), constant_bw=False, seed=i) for i in range(10)] bbr_rewards = [] cubic_rewards = [] udr_small_rewards = [] udr_mid_rewards = [] udr_large_rewards = [] genet_rewards = [] for i, trace in enumerate(tqdm(traces)): save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( dim), "val_{}".format(val_idx), "trace_{}".format(i)) os.makedirs(save_dir, exist_ok=True) trace_file = os.path.join(save_dir, "trace_{}.json".format(i)) trace.dump(trace_file) # bbr # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( # dim), "val_{}".format(val_idx), "trace_{}".format(i), "bbr") # os.makedirs(save_dir, exist_ok=True) # # if os.path.exists(os.path.join(save_dir, "bbr_packet_log.csv")): # pkt_log = PacketLog.from_log_file( # os.path.join(save_dir, "bbr_packet_log.csv")) # pkt_level_reward = pkt_log.get_reward("", trace) # else: # test_reward, pkt_level_reward = bbr.test(trace, save_dir) # # # bbr_rewards.append(test_reward) # bbr_rewards.append(pkt_level_reward) # cubic # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( # dim), "val_{}".format(val_idx), "trace_{}".format(i), "cubic") # os.makedirs(save_dir, exist_ok=True) # # if os.path.exists(os.path.join(save_dir, "cubic_packet_log.csv")): # pkt_log = PacketLog.from_log_file( # os.path.join(save_dir, "cubic_packet_log.csv")) # pkt_level_reward = pkt_log.get_reward("", trace) # else: # test_reward, pkt_level_reward = cubic.test(trace, save_dir) # # cubic_rewards.append(test_reward) # cubic_rewards.append(pkt_level_reward) # cmd = "python ../plot_scripts/plot_packet_log.py --log-file {} " \ # "--save-dir {} --trace-file {}".format( # os.path.join(save_dir, "cubic_packet_log.csv"), # save_dir, trace_file) # subprocess.check_output(cmd, shell=True).strip() # cmd = "python ../plot_scripts/plot_time_series.py --log-file {} " \ # "--save-dir {} --trace-file {}".format( # os.path.join(save_dir, "cubic_simulation_log.csv"), # save_dir, trace_file) # subprocess.check_output(cmd, shell=True).strip() # genet save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( dim), "val_{}".format(val_idx), "trace_{}".format(i), "genet") if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")): pkt_log = PacketLog.from_log_file( os.path.join(save_dir, "aurora_packet_log.csv")) else: _, reward_list, _, _, _, _, _, _, _, pkt_log = genet.test( trace, save_dir) pkt_log = PacketLog.from_log(pkt_log) genet_rewards.append(pkt_log.get_reward("", trace)) # udr_small # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( # dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_small") # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")): # pkt_log = PacketLog.from_log_file( # os.path.join(save_dir, "aurora_packet_log.csv")) # else: # _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_small.test( # trace, save_dir) # pkt_log = PacketLog.from_log(pkt_log) # udr_small_rewards.append(pkt_log.get_reward("", trace)) # # # udr_mid # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( # dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_mid") # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")): # pkt_log = PacketLog.from_log_file( # os.path.join(save_dir, "aurora_packet_log.csv")) # else: # _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_mid.test( # trace, save_dir) # pkt_log = PacketLog.from_log(pkt_log) # udr_mid_rewards.append(pkt_log.get_reward("", trace)) # _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_mid.test( # trace, save_dir) # # test_reward = np.mean(reward_list) # # udr_mid_rewards.append(test_reward) # # # udr_large # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format( # dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_large") # os.makedirs(save_dir, exist_ok=True) # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")): # pkt_log = PacketLog.from_log_file( # os.path.join(save_dir, "aurora_packet_log.csv")) # else: # _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_large.test( # trace, save_dir) # pkt_log = PacketLog.from_log(pkt_log) # # test_reward = np.mean(reward_list) # # udr_large_rewards.append(test_reward) # udr_large_rewards.append(pkt_log.get_reward("", trace)) # # cmd = "python ../plot_scripts/plot_packet_log.py --log-file {} " \ # # "--save-dir {} --trace-file {}".format( # # os.path.join(save_dir, "aurora_packet_log.csv"), # # save_dir, trace_file) # # subprocess.check_output(cmd, shell=True).strip() # # cmd = "python ../plot_scripts/plot_time_series.py --log-file {} " \ # # "--save-dir {} --trace-file {}".format( # # os.path.join(save_dir, "aurora_simulation_log.csv"), # # save_dir, trace_file) # # subprocess.check_output(cmd, shell=True).strip() # # # # genet_model.test(trace) print(len(cubic_avg_rewards), len(udr_large_avg_rewards)) bbr_avg_rewards.append(np.mean(bbr_rewards)) bbr_reward_errs.append( np.std(bbr_rewards) / np.sqrt(len(bbr_rewards))) cubic_avg_rewards.append(np.mean(cubic_rewards)) cubic_reward_errs.append( np.std(cubic_rewards) / np.sqrt(len(cubic_rewards))) udr_small_avg_rewards.append(np.mean(udr_small_rewards)) udr_small_reward_errs.append( np.std(udr_small_rewards) / np.sqrt(len(udr_small_rewards))) udr_mid_avg_rewards.append(np.mean(udr_mid_rewards)) udr_mid_reward_errs.append( np.std(udr_mid_rewards) / np.sqrt(len(udr_mid_rewards))) udr_large_avg_rewards.append(np.mean(udr_large_rewards)) udr_large_reward_errs.append( np.std(udr_large_rewards) / np.sqrt(len(udr_large_rewards))) genet_avg_rewards.append(np.mean(genet_rewards)) genet_reward_errs.append( np.std(genet_rewards) / np.sqrt(len(genet_rewards))) plt.figure() ax = plt.gca() import pdb pdb.set_trace() ax.plot(vals_to_test, genet_avg_rewards, color='C2', linewidth=4, alpha=1, linestyle='-', label="GENET") genet_low_bnd = np.array(genet_avg_rewards) - \ np.array(genet_reward_errs) genet_up_bnd = np.array(genet_avg_rewards) + \ np.array(genet_reward_errs) ax.fill_between(vals_to_test, genet_low_bnd, genet_up_bnd, color='C2', alpha=0.1) ax.plot(vals_to_test, bbr_avg_rewards, color='C0', linestyle='-.', linewidth=4, alpha=1, label="BBR") bbr_low_bnd = np.array(bbr_avg_rewards) - \ np.array(bbr_reward_errs) bbr_up_bnd = np.array(bbr_avg_rewards) + \ np.array(bbr_reward_errs) ax.fill_between(vals_to_test, bbr_low_bnd, bbr_up_bnd, color='C0', alpha=0.1) ax.plot(vals_to_test, cubic_avg_rewards, color='C0', linestyle='-', linewidth=4, alpha=1, label="TCP Cubic") cubic_low_bnd = np.array(cubic_avg_rewards) - \ np.array(cubic_reward_errs) cubic_up_bnd = np.array(cubic_avg_rewards) + \ np.array(cubic_reward_errs) ax.fill_between(vals_to_test, cubic_low_bnd, cubic_up_bnd, color='C0', alpha=0.1) ax.plot(vals_to_test, udr_small_avg_rewards, color='grey', linewidth=4, linestyle=':', label="UDR-1") udr_small_low_bnd = np.array( udr_small_avg_rewards) - np.array(udr_small_reward_errs) udr_small_up_bnd = np.array( udr_small_avg_rewards) + np.array(udr_small_reward_errs) ax.fill_between(vals_to_test, udr_small_low_bnd, udr_small_up_bnd, color='grey', alpha=0.1) ax.plot(vals_to_test, udr_mid_avg_rewards, color='grey', linewidth=4, linestyle='--', label="UDR-2") udr_mid_low_bnd = np.array( udr_mid_avg_rewards) - np.array(udr_mid_reward_errs) udr_mid_up_bnd = np.array(udr_mid_avg_rewards) + \ np.array(udr_mid_reward_errs) ax.fill_between(vals_to_test, udr_mid_low_bnd, udr_mid_up_bnd, color='grey', alpha=0.1) ax.plot(vals_to_test, udr_large_avg_rewards, color='grey', linewidth=4, linestyle='-.', label="UDR-3") udr_large_low_bnd = np.array( udr_large_avg_rewards) - np.array(udr_large_reward_errs) udr_large_up_bnd = np.array( udr_large_avg_rewards) + np.array(udr_large_reward_errs) ax.fill_between(vals_to_test, udr_large_low_bnd, udr_large_up_bnd, color='grey', alpha=0.1) ax.set_xlabel("{}({})".format(dim, unit)) ax.set_ylabel("Reward") ax.legend() plt.tight_layout() with open(os.path.join(RESULT_ROOT, EXP_NAME, "sim_eval_vary_{}_bbr_with_cubic.csv".format(dim)), 'w') as f: writer = csv.writer(f) writer.writerow([dim, 'genet_avg_rewards', 'genet_low_bnd', 'genet_up_bnd', 'bbr_avg_rewards', 'bbr_low_bnd', 'bbr_up_bnd', 'cubic_avg_rewards', 'cubic_low_bnd', 'cubic_up_bnd', 'udr_small_avg_rewards', 'udr_small_low_bnd', 'udr_small_up_bnd', 'udr_mid_avg_rewards', 'udr_mid_low_bnd', 'udr_mid_up_bnd', 'udr_large_avg_rewards', 'udr_large_low_bnd', 'udr_large_up_bnd']) writer.writerows(zip(vals_to_test, genet_avg_rewards, genet_low_bnd, genet_up_bnd, bbr_avg_rewards, bbr_low_bnd, bbr_up_bnd, cubic_avg_rewards, cubic_low_bnd, cubic_up_bnd, udr_small_avg_rewards, udr_small_low_bnd, udr_small_up_bnd, udr_mid_avg_rewards, udr_mid_low_bnd, udr_mid_up_bnd, udr_large_avg_rewards, udr_large_low_bnd, udr_large_up_bnd)) save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "sim_eval_vary_{}_bbr_with_cubic.png".format(dim)) save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "sim_eval_vary_{}_bbr_with_cubic.pdf".format(dim)) plt.savefig(save_dir)
def main(): args = parse_args() set_seed(args.seed) # tokens = os.path.basename(os.path.dirname(os.path.dirname(args.save_dir))).split('_') # config0_dim0_idx = int(tokens[1]) # config0_dim1_idx = int(tokens[2]) # config1_dim0_idx = int(tokens[4]) # config1_dim1_idx = int(tokens[5]) dim0, dim1 = args.dims config = read_json_file(args.config_file)[0] assert dim0 in config and dim1 in config # dim0_vals = np.linspace(config[dim0][0], config[dim0][1], 10) # dim1_vals = np.linspace(config[dim1][0], config[dim1][1], 10) dim0_vals = get_dim_vals(dim0) dim1_vals = get_dim_vals(dim1) print(dim0_vals) print(dim1_vals) traces = [] save_dirs = [] with open('heatmap_trace_cnt_ratio.npy', 'rb') as f: cnt_ratio = np.load(f) for dim0_idx, dim0_val in enumerate(dim0_vals): for dim1_idx, dim1_val in enumerate(dim1_vals): dim_vals = copy.copy(DEFAULT_VALUES) dim_vals[dim0] = dim0_val dim_vals[dim1] = dim1_val # print(i, dim0_val, dim1_val, dim_vals) cnt = 10 # if cnt_ratio[dim0_idx, dim1_idx] > 1: # cnt *= int(cnt_ratio[dim0_idx, dim1_idx]) # print(cnt) for trace_idx in range(cnt): trace = generate_trace( duration_range=(dim_vals['duration'], dim_vals['duration']), bandwidth_lower_bound_range=( dim_vals['bandwidth_lower_bound'], dim_vals['bandwidth_lower_bound']), bandwidth_upper_bound_range=( dim_vals['bandwidth_upper_bound'], dim_vals['bandwidth_upper_bound']), delay_range=(dim_vals['delay'], dim_vals['delay']), loss_rate_range=(dim_vals['loss'], dim_vals['loss']), queue_size_range=(dim_vals['queue'], dim_vals['queue']), T_s_range=(dim_vals['T_s'], dim_vals['T_s']), delay_noise_range=(dim_vals['delay_noise'], dim_vals['delay_noise'])) traces.append(trace) save_dir = os.path.join( args.save_dir, 'pair_{}_{}'.format(dim0_idx, dim1_idx), 'trace_{}'.format(trace_idx)) save_dirs.append(save_dir) os.makedirs(save_dir, exist_ok=True) trace.dump( os.path.join(save_dir, 'trace_{}.json'.format(trace_idx))) if args.cc == 'genet_bbr' or args.cc == 'genet_cubic' or args.cc == 'genet_bbr_old': genet_seed = '' for s in args.models_path.split('/'): if 'seed' in s: genet_seed = s for bo in range(0, 30, 3): # for bo_dir in natural_sort(glob.glob(os.path.join(args.models_path, "bo_*/"))): bo_dir = os.path.join(args.models_path, "bo_{}".format(bo)) step = 64800 model_path = os.path.join(bo_dir, 'model_step_{}.ckpt'.format(step)) if not os.path.exists(model_path + '.meta'): print(model_path, 'does not exist') continue print(model_path) genet_save_dirs = [ os.path.join(save_dir, args.cc, genet_seed, "bo_{}".format(bo), "step_{}".format(step)) for save_dir in save_dirs ] t_start = time.time() test_on_traces(model_path, traces, genet_save_dirs, args.nproc, 42, False, False) print('bo {}: {:.3f}'.format(bo, time.time() - t_start)) elif args.cc == 'pretrained': pretrained_save_dirs = [ os.path.join(save_dir, args.cc) for save_dir in save_dirs ] t_start = time.time() test_on_traces(args.models_path, traces, pretrained_save_dirs, args.nproc, 42, False, False) print('pretrained: {:.3f}'.format(time.time() - t_start)) elif args.cc == 'overfit_config': overfit_config_save_dirs = [ os.path.join(save_dir, args.cc) for save_dir in save_dirs ] t_start = time.time() test_on_traces(args.models_path, traces, overfit_config_save_dirs, args.nproc, 42, False, False) print('overfit_config: {:.3f}'.format(time.time() - t_start)) else: if args.cc == 'bbr': cc = BBR(False) elif args.cc == 'cubic': cc = Cubic(False) elif args.cc == 'bbr_old': cc = BBR_old(False) else: raise NotImplementedError heuristic_save_dirs = [ os.path.join(save_dir, cc.cc_name) for save_dir in save_dirs ] t_start = time.time() cc.test_on_traces(traces, heuristic_save_dirs, False, args.nproc) print('{}: {:.3f}'.format(args.cc, time.time() - t_start))
# if 'bbr' not in trace_file and 'cubic' not in trace_file and \ # 'vegas' not in trace_file and 'pcc' not in trace_file and 'copa' not in trace_file: # continue # if 'experimental' in trace_file: # continue # tr = Trace.load_from_pantheon_file(trace_file, 50, 0, int(np.random.uniform(10, 10, 1).item())) # print(tr.delays) # print(min(tr.bandwidths), max(tr.bandwidths)) # real_traces.append(tr) syn_traces = [ generate_trace( duration_range=(30, 30), bandwidth_range=(2, 2), delay_range=(30, 40), # delay_range=(100, 200), loss_rate_range=(0, 0), queue_size_range=(10, 30), T_s_range=(0, 0), delay_noise_range=(0, 0), constant_bw=False) for _ in range(5) ] aurora_udr_big = Aurora(seed=20, log_dir="tmp", timesteps_per_actorbatch=10, pretrained_model_path=MODEL_PATH, delta_scale=1) # cubic_rewards, _ = test_on_traces(syn_traces, ['tmp']*len(syn_traces), seed=20) # # results, _ = aurora_udr_big.test_on_traces(
loss = val elif metric == 'queue': queue = val elif metric == 'T_s': T_s = val elif metric == 'delay_noise': delay_noise = val else: raise RuntimeError for i in range(10): trace = generate_trace(duration_range=(10, 10), bandwidth_range=(1, 1 + bandwidth), delay_range=(delay, delay), loss_rate_range=(loss, loss), queue_size_range=(queue, queue), T_s_range=(T_s, T_s), delay_noise_range=(delay_noise, delay_noise), constant_bw=False) os.makedirs(os.path.join(SAVE_DIR, 'rand_{}'.format(metric), str(val), 'config_{}'.format(config_id), 'trace_{}'.format(i), 'cubic'), exist_ok=True) os.makedirs(os.path.join(SAVE_DIR, 'rand_{}'.format(metric), str(val), 'config_{}'.format(config_id), 'trace_{}'.format(i), 'udr_big'), exist_ok=True) os.makedirs(os.path.join(SAVE_DIR, 'rand_{}'.format(metric), str(val), 'config_{}'.format(config_id),
if 'bbr' not in trace_file and 'cubic' not in trace_file and \ 'vegas' not in trace_file and 'pcc' not in trace_file and 'copa' not in trace_file: continue if 'experimental' in trace_file: continue tr = Trace.load_from_pantheon_file(trace_file, 50, 0, int(np.random.uniform(10, 10, 1).item())) print(tr.delays) print(min(tr.bandwidths), max(tr.bandwidths)) real_traces.append(tr) syn_traces = [generate_trace(duration_range=(30, 30), bandwidth_range=(1, 3), delay_range=(30, 50), # delay_range=(100, 200), loss_rate_range=(0, 0), queue_size_range=(10, 60), T_s_range=(1, 3), delay_noise_range=(0, 0), constant_bw=False) for _ in range(15)] # aurora_udr_big = Aurora(seed=20, log_dir="tmp", timesteps_per_actorbatch=10, # pretrained_model_path=MODEL_PATH, delta_scale=1) # # cubic_rewards, _ = test_on_traces(syn_traces, ['tmp']*len(syn_traces), seed=20) # # results, _ = aurora_udr_big.test_on_traces( # syn_traces, ['tmp']*len(syn_traces)) # # print(np.mean(np.array(cubic_rewards), axis=0)) # avg_cubic_rewards = np.mean([np.mean(r) for r in cubic_rewards]) # avg_cubic_rewards_errs = compute_std_of_mean([np.mean(r) for r in cubic_rewards])