Exemplo n.º 1
0
def main():
    args = parse_args()
    assert args.pretrained_model_path is None or args.pretrained_model_path.endswith(
        ".ckpt")
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args, args.save_dir)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(args.seed + COMM_WORLD.Get_rank() * 100, args.save_dir,
                    int(7200 / nprocs), args.pretrained_model_path,
                    tensorboard_log=args.tensorboard_log)
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.train_trace_file:
        with open(args.train_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                training_traces.append(Trace.load_from_file(line))

    if args.val_trace_file:
        with open(args.val_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    val_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    val_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError

    aurora.train(args.randomization_range_file,
                 args.total_timesteps, tot_trace_cnt=args.total_trace_count,
                 tb_log_name=args.exp_name, validation_flag=args.validation,
                 training_traces=training_traces,
                 validation_traces=val_traces,
                 real_trace_prob=args.real_trace_prob)
Exemplo n.º 2
0
def main():
    set_seed(42)
    dummy_trace = generate_trace(duration_range=(10, 10),
                                 bandwidth_lower_bound_range=(0.1, 0.1),
                                 bandwidth_upper_bound_range=(12, 12),
                                 delay_range=(25, 25),
                                 loss_rate_range=(0.0, 0.0),
                                 queue_size_range=(1, 1),
                                 T_s_range=(3, 3),
                                 delay_noise_range=(0, 0))
    dummy_trace.dump(os.path.join(SAVE_DIR, "test_trace.json"))

    genet = Aurora(seed=20,
                   log_dir=SAVE_DIR,
                   pretrained_model_path=MODEL_PATH,
                   timesteps_per_actorbatch=10,
                   record_pkt_log=True)
    t_start = time.time()
    print(genet.test(dummy_trace, SAVE_DIR, True, saliency=True))
    print("aurora", time.time() - t_start)
Exemplo n.º 3
0
def main():
    args = parse_args()
    set_seed(args.seed)
    if args.save_dir:
        os.makedirs(args.save_dir, exist_ok=True)

    if args.trace_file is not None and args.trace_file.endswith('.json'):
        test_traces = [Trace.load_from_file(args.trace_file)]
    elif args.trace_file is not None and args.trace_file.endswith('.log'):
        test_traces = [
            Trace.load_from_pantheon_file(args.trace_file, args.delay,
                                          args.loss, args.queue)
        ]
    elif args.config_file is not None:
        test_traces = generate_traces(args.config_file,
                                      1,
                                      args.duration,
                                      constant_bw=not args.time_variant_bw)
    else:
        test_traces = [
            generate_trace((args.duration, args.duration),
                           (args.bandwidth, args.bandwidth),
                           (args.delay, args.delay), (args.loss, args.loss),
                           (args.queue, args.queue), (60, 60), (60, 60),
                           constant_bw=not args.time_variant_bw)
        ]
    # print(test_traces[0].bandwidths)

    aurora = Aurora(seed=args.seed,
                    timesteps_per_actorbatch=10,
                    log_dir=args.save_dir,
                    pretrained_model_path=args.model_path,
                    delta_scale=args.delta_scale)
    results, pkt_logs = aurora.test_on_traces(test_traces, [args.save_dir])

    for pkt_log in pkt_logs:
        with open(os.path.join(args.save_dir, "aurora_packet_log.csv"), 'w',
                  1) as f:
            pkt_logger = csv.writer(f, lineterminator='\n')
            pkt_logger.writerows(pkt_log)
Exemplo n.º 4
0
    def __init__(self, flow_id):
        global RESET_RATE_MIN
        global RESET_RATE_MAX
        args = parse_args()

        self.id = flow_id

        self.rate = random.uniform(RESET_RATE_MIN, RESET_RATE_MAX)
        self.history_len = args.history_len
        self.features = args.input_features
        self.history = sender_obs.SenderHistory(self.history_len,
                                                self.features, self.id)
        self.save_dir = args.save_dir
        self.log_writer = csv.writer(open(
            os.path.join(self.save_dir, 'aurora_emulation_log.csv'), 'w', 1),
                                     lineterminator='\n')
        self.log_writer.writerow([
            'timestamp', "target_send_rate", "send_rate", 'recv_rate',
            'latency', 'loss', 'reward', "action", "bytes_sent", "bytes_acked",
            'bytes_lost', 'send_start_time', "send_end_time",
            'recv_start_time', 'recv_end_time', 'latency_increase',
            "sent_latency_inflation", 'latency_ratio', 'send_ratio',
            'recv_ratio', 'packet_size', "min_rtt", 'rtt_samples'
        ])
        self.got_data = False

        # self.agent = loaded_agent.LoadedModelAgent(args.model_path)
        self.aurora = Aurora(seed=20,
                             log_dir="",
                             timesteps_per_actorbatch=10,
                             pretrained_model_path=args.model_path,
                             delta_scale=DELTA_SCALE)

        PccGymDriver.flow_lookup[flow_id] = self

        self.t_start = time.time()
        # dummpy inference here to load model
        # _ = self.agent.act(self.history.as_array())
        _ = self.aurora.model.predict(self.history.as_array(),
                                      deterministic=True)

        self.mi_pushed = False

        self.idx = 1
Exemplo n.º 5
0
def main():
    args = parse_args()
    set_seed(args.seed)
    if args.save_dir:
        os.makedirs(args.save_dir, exist_ok=True)

    df = pd.read_csv(args.log_file, sep='\t')
    assert isinstance(df, pd.DataFrame)

    latest_step = int(df['num_timesteps'].iloc[-1])
    assert os.path.exists(
        os.path.join(os.path.dirname(args.log_file),
                     "model_step_{}.ckpt.meta".format(latest_step)))
    latest_model_path = os.path.join(os.path.dirname(args.log_file),
                                     "model_step_{}.ckpt".format(latest_step))

    aurora = Aurora(seed=args.seed,
                    timesteps_per_actorbatch=10,
                    log_dir="",
                    pretrained_model_path=latest_model_path)
    bbr = BBR(True)
    cubic = Cubic(True)

    test_traces = []
    trace_dirs = []
    for noise in [0, 20]:
        for bw in [20, 50]:
            tr = generate_trace((30, 30), (bw, bw), (bw, bw), (25, 25), (0, 0),
                                (0.1, 0.1), (60, 60), (noise, noise))
            test_traces.append(tr)

    for _ in range(5):
        test_traces.append(
            generate_trace((30, 30), (0.1, 0.1), (20, 20), (50, 100), (0, 0),
                           (0.5, 1), (10, 10), (0, 10)))
        test_traces.append(
            generate_trace((30, 30), (10, 10), (100, 100), (50, 100), (0, 0),
                           (0.5, 1), (10, 10), (0, 10)))

    for i, tr in enumerate(test_traces):
        os.makedirs(os.path.join(args.save_dir, 'trace_{}'.format(i)),
                    exist_ok=True)
        tr.dump(os.path.join(args.save_dir, 'trace_{}'.format(i),
                             'trace.json'))
        trace_dirs.append(os.path.join(args.save_dir, 'trace_{}'.format(i)))

    t_start = time.time()
    aurora_pkt_level_rewards = []
    for tr, save_dir in zip(test_traces, trace_dirs):
        _, pkt_level_reward = aurora.test(tr, save_dir, True)
        aurora_pkt_level_rewards.append(pkt_level_reward)
    print('aurora', time.time() - t_start)
    t_start = time.time()
    bbr_results = bbr.test_on_traces(test_traces, trace_dirs, True)
    print('bbr', time.time() - t_start)
    t_start = time.time()
    cubic_results = cubic.test_on_traces(test_traces, trace_dirs, True)
    print('cubic', time.time() - t_start)

    bbr_pkt_level_rewards = [val for _, val in bbr_results]
    cubic_pkt_level_rewards = [val for _, val in cubic_results]
    mean_rewards = [
        np.mean(aurora_pkt_level_rewards),
        np.mean(bbr_pkt_level_rewards),
        np.mean(cubic_pkt_level_rewards)
    ]
    reward_errs = [
        np.std(aurora_pkt_level_rewards),
        np.std(bbr_pkt_level_rewards),
        np.std(cubic_pkt_level_rewards)
    ]
    plt.bar([1, 2, 3], mean_rewards, yerr=reward_errs, width=0.5)
    plt.xticks([1, 2, 3], ['aurora', 'bbr', 'cubic'])
    plt.ylabel('Test Reward')
    plt.tight_layout()
    plt.savefig(os.path.join(args.save_dir, 'test_cc.jpg'))
Exemplo n.º 6
0
Arquivo: test.py Projeto: zxxia/PCC-RL
def main():
    bbr = BBR(True)
    cubic = Cubic(True)
    # cubic = Cubic(20)
    genet = Aurora(seed=20, log_dir=RESULT_ROOT,
                   pretrained_model_path=GENET_MODEL_PATH,
                   timesteps_per_actorbatch=10, delta_scale=1)
    udr_small = Aurora(seed=20, log_dir=RESULT_ROOT,
                       pretrained_model_path=UDR_SMALL_MODEL_PATH,
                       timesteps_per_actorbatch=10, delta_scale=1)
    udr_mid = Aurora(seed=20, log_dir=RESULT_ROOT,
                     pretrained_model_path=UDR_MID_MODEL_PATH,
                     timesteps_per_actorbatch=10, delta_scale=1)
    udr_large = Aurora(seed=20, log_dir=RESULT_ROOT,
                       pretrained_model_path=UDR_LARGE_MODEL_PATH,
                       timesteps_per_actorbatch=10, delta_scale=1)

    # for _ in range(10):
    #     DEFAULT_CONFIGS.append(
    #         {
    #          "bandwidth": 10 ** np.random.uniform(np.log10(1), np.log10(10), 1).item(),
    #          "delay": np.random.uniform(5, 200, 1).item(),
    #          "loss": np.random.uniform(0, 0.0, 1).item(),
    #          "queue": 10 ** np.random.uniform(np.log10(2), np.log10(30), 1).item(),
    #          "T_s": np.random.randint(0, 6, 1).item(),
    #          "delay_noise": np.random.uniform(0, 0, 1).item()})
    # print(DEFAULT_CONFIGS)
    udr_train_config = read_json_file(UDR_TRAIN_CONFIG_FILE)[0]
    # print(udr_train_config)
    for dim, unit in zip(DIMS_TO_VARY, DIM_UNITS):
        print(dim, udr_train_config[dim])
        if dim == 'bandwidth':
            vals_to_test = 10**np.linspace(
                np.log10(1), np.log10(udr_train_config[dim][1]), 10)
        elif dim == 'queue':
            vals_to_test = 10**np.linspace(
                np.log10(udr_train_config[dim][0]), np.log10(udr_train_config[dim][1]), 10)
        elif dim == 'loss':
            vals_to_test = np.linspace(0, 0.005, 10)
        else:
            vals_to_test = np.linspace(
                udr_train_config[dim][0], udr_train_config[dim][1], 10)
        print(vals_to_test)
        bbr_avg_rewards = []
        bbr_reward_errs = []
        cubic_avg_rewards = []
        cubic_reward_errs = []
        udr_small_avg_rewards = []
        udr_small_reward_errs = []
        udr_mid_avg_rewards = []
        udr_mid_reward_errs = []
        udr_large_avg_rewards = []
        udr_large_reward_errs = []
        genet_avg_rewards = []
        genet_reward_errs = []
        for val_idx, val in enumerate(vals_to_test):
            if dim == 'bandwidth':
                max_bw = val
            else:
                max_bw = DEFAULT_VALUES['bandwidth']

            if dim == 'delay':
                min_delay, max_delay = val, val
            else:
                min_delay, max_delay = DEFAULT_VALUES['delay'], DEFAULT_VALUES['delay']
            if dim == 'loss':
                min_loss, max_loss = val, val
            else:
                min_loss, max_loss = DEFAULT_VALUES['loss'], DEFAULT_VALUES['loss']
            if dim == 'queue':
                min_queue, max_queue = val, val
            else:
                min_queue, max_queue = DEFAULT_VALUES['queue'], DEFAULT_VALUES['queue']
            if dim == 'T_s':
                min_T_s, max_T_s = val, val
            else:
                min_T_s, max_T_s = DEFAULT_VALUES['T_s'], DEFAULT_VALUES['T_s']

            # generate n=10 traces for each config
            traces = [generate_trace(duration_range=(30, 30),
                                     bandwidth_range=(1, max_bw),
                                     delay_range=(min_delay, max_delay),
                                     loss_rate_range=(min_loss, max_loss),
                                     queue_size_range=(min_queue, max_queue),
                                     T_s_range=(min_T_s, max_T_s),
                                     delay_noise_range=(0, 0),
                                     constant_bw=False, seed=i) for i in range(10)]
            bbr_rewards = []
            cubic_rewards = []
            udr_small_rewards = []
            udr_mid_rewards = []
            udr_large_rewards = []
            genet_rewards = []
            for i, trace in enumerate(tqdm(traces)):
                save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                    dim), "val_{}".format(val_idx), "trace_{}".format(i))
                os.makedirs(save_dir, exist_ok=True)
                trace_file = os.path.join(save_dir, "trace_{}.json".format(i))
                trace.dump(trace_file)

                # bbr
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "bbr")
                # os.makedirs(save_dir, exist_ok=True)
                #
                # if os.path.exists(os.path.join(save_dir, "bbr_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "bbr_packet_log.csv"))
                #     pkt_level_reward = pkt_log.get_reward("", trace)
                # else:
                #     test_reward, pkt_level_reward = bbr.test(trace, save_dir)
                #
                # # bbr_rewards.append(test_reward)
                # bbr_rewards.append(pkt_level_reward)

                # cubic
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "cubic")
                # os.makedirs(save_dir, exist_ok=True)
                #
                # if os.path.exists(os.path.join(save_dir, "cubic_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "cubic_packet_log.csv"))
                #     pkt_level_reward = pkt_log.get_reward("", trace)
                # else:
                #     test_reward, pkt_level_reward = cubic.test(trace, save_dir)
                # # cubic_rewards.append(test_reward)
                # cubic_rewards.append(pkt_level_reward)

                # cmd = "python ../plot_scripts/plot_packet_log.py --log-file {} " \
                #     "--save-dir {} --trace-file {}".format(
                #         os.path.join(save_dir, "cubic_packet_log.csv"),
                #         save_dir, trace_file)
                # subprocess.check_output(cmd, shell=True).strip()
                # cmd = "python ../plot_scripts/plot_time_series.py --log-file {} " \
                #     "--save-dir {} --trace-file {}".format(
                #         os.path.join(save_dir, "cubic_simulation_log.csv"),
                #         save_dir, trace_file)
                # subprocess.check_output(cmd, shell=True).strip()

                # genet
                save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                    dim), "val_{}".format(val_idx), "trace_{}".format(i), "genet")
                if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                    pkt_log = PacketLog.from_log_file(
                        os.path.join(save_dir, "aurora_packet_log.csv"))
                else:
                    _, reward_list, _, _, _, _, _, _, _, pkt_log = genet.test(
                        trace, save_dir)
                    pkt_log = PacketLog.from_log(pkt_log)
                genet_rewards.append(pkt_log.get_reward("", trace))

                # udr_small
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_small")
                # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "aurora_packet_log.csv"))
                # else:
                #     _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_small.test(
                #         trace, save_dir)
                #     pkt_log = PacketLog.from_log(pkt_log)
                # udr_small_rewards.append(pkt_log.get_reward("", trace))
                #
                # # udr_mid
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_mid")
                # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "aurora_packet_log.csv"))
                # else:
                #     _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_mid.test(
                #         trace, save_dir)
                #     pkt_log = PacketLog.from_log(pkt_log)
                # udr_mid_rewards.append(pkt_log.get_reward("", trace))
                # _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_mid.test(
                #     trace, save_dir)
                # # test_reward = np.mean(reward_list)
                # # udr_mid_rewards.append(test_reward)
                #
                # # udr_large
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_large")
                # os.makedirs(save_dir, exist_ok=True)
                # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "aurora_packet_log.csv"))
                # else:
                #     _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_large.test(
                #         trace, save_dir)
                #     pkt_log = PacketLog.from_log(pkt_log)
                # # test_reward = np.mean(reward_list)
                # # udr_large_rewards.append(test_reward)
                # udr_large_rewards.append(pkt_log.get_reward("", trace))
                # # cmd = "python ../plot_scripts/plot_packet_log.py --log-file {} " \
                # #     "--save-dir {} --trace-file {}".format(
                # #         os.path.join(save_dir, "aurora_packet_log.csv"),
                # #         save_dir, trace_file)
                # # subprocess.check_output(cmd, shell=True).strip()
                # # cmd = "python ../plot_scripts/plot_time_series.py --log-file {} " \
                # #     "--save-dir {} --trace-file {}".format(
                # #         os.path.join(save_dir, "aurora_simulation_log.csv"),
                # #         save_dir, trace_file)
                # # subprocess.check_output(cmd, shell=True).strip()
                #
                # # # genet_model.test(trace)
            print(len(cubic_avg_rewards), len(udr_large_avg_rewards))
            bbr_avg_rewards.append(np.mean(bbr_rewards))
            bbr_reward_errs.append(
                np.std(bbr_rewards) / np.sqrt(len(bbr_rewards)))
            cubic_avg_rewards.append(np.mean(cubic_rewards))
            cubic_reward_errs.append(
                np.std(cubic_rewards) / np.sqrt(len(cubic_rewards)))
            udr_small_avg_rewards.append(np.mean(udr_small_rewards))
            udr_small_reward_errs.append(
                np.std(udr_small_rewards) / np.sqrt(len(udr_small_rewards)))
            udr_mid_avg_rewards.append(np.mean(udr_mid_rewards))
            udr_mid_reward_errs.append(
                np.std(udr_mid_rewards) / np.sqrt(len(udr_mid_rewards)))
            udr_large_avg_rewards.append(np.mean(udr_large_rewards))
            udr_large_reward_errs.append(
                np.std(udr_large_rewards) / np.sqrt(len(udr_large_rewards)))

            genet_avg_rewards.append(np.mean(genet_rewards))
            genet_reward_errs.append(
                np.std(genet_rewards) / np.sqrt(len(genet_rewards)))
        plt.figure()
        ax = plt.gca()

        import pdb
        pdb.set_trace()
        ax.plot(vals_to_test, genet_avg_rewards, color='C2',
                linewidth=4, alpha=1, linestyle='-', label="GENET")
        genet_low_bnd = np.array(genet_avg_rewards) - \
            np.array(genet_reward_errs)
        genet_up_bnd = np.array(genet_avg_rewards) + \
            np.array(genet_reward_errs)
        ax.fill_between(vals_to_test, genet_low_bnd,
                        genet_up_bnd, color='C2', alpha=0.1)

        ax.plot(vals_to_test, bbr_avg_rewards, color='C0',
                linestyle='-.', linewidth=4, alpha=1, label="BBR")
        bbr_low_bnd = np.array(bbr_avg_rewards) - \
            np.array(bbr_reward_errs)
        bbr_up_bnd = np.array(bbr_avg_rewards) + \
            np.array(bbr_reward_errs)
        ax.fill_between(vals_to_test, bbr_low_bnd,
                        bbr_up_bnd, color='C0', alpha=0.1)

        ax.plot(vals_to_test, cubic_avg_rewards, color='C0',
                linestyle='-', linewidth=4, alpha=1, label="TCP Cubic")
        cubic_low_bnd = np.array(cubic_avg_rewards) - \
            np.array(cubic_reward_errs)
        cubic_up_bnd = np.array(cubic_avg_rewards) + \
            np.array(cubic_reward_errs)
        ax.fill_between(vals_to_test, cubic_low_bnd,
                        cubic_up_bnd, color='C0', alpha=0.1)

        ax.plot(vals_to_test, udr_small_avg_rewards, color='grey',
                linewidth=4, linestyle=':', label="UDR-1")
        udr_small_low_bnd = np.array(
            udr_small_avg_rewards) - np.array(udr_small_reward_errs)
        udr_small_up_bnd = np.array(
            udr_small_avg_rewards) + np.array(udr_small_reward_errs)
        ax.fill_between(vals_to_test, udr_small_low_bnd,
                        udr_small_up_bnd, color='grey', alpha=0.1)

        ax.plot(vals_to_test, udr_mid_avg_rewards, color='grey',
                linewidth=4, linestyle='--', label="UDR-2")
        udr_mid_low_bnd = np.array(
            udr_mid_avg_rewards) - np.array(udr_mid_reward_errs)
        udr_mid_up_bnd = np.array(udr_mid_avg_rewards) + \
            np.array(udr_mid_reward_errs)
        ax.fill_between(vals_to_test, udr_mid_low_bnd,
                        udr_mid_up_bnd, color='grey', alpha=0.1)

        ax.plot(vals_to_test, udr_large_avg_rewards, color='grey',
                linewidth=4, linestyle='-.', label="UDR-3")
        udr_large_low_bnd = np.array(
            udr_large_avg_rewards) - np.array(udr_large_reward_errs)
        udr_large_up_bnd = np.array(
            udr_large_avg_rewards) + np.array(udr_large_reward_errs)
        ax.fill_between(vals_to_test, udr_large_low_bnd,
                        udr_large_up_bnd, color='grey', alpha=0.1)
        ax.set_xlabel("{}({})".format(dim, unit))
        ax.set_ylabel("Reward")
        ax.legend()
        plt.tight_layout()
        with open(os.path.join(RESULT_ROOT, EXP_NAME, "sim_eval_vary_{}_bbr_with_cubic.csv".format(dim)), 'w') as f:
            writer = csv.writer(f)
            writer.writerow([dim, 'genet_avg_rewards', 'genet_low_bnd', 'genet_up_bnd',
                'bbr_avg_rewards', 'bbr_low_bnd', 'bbr_up_bnd',
                'cubic_avg_rewards', 'cubic_low_bnd', 'cubic_up_bnd',
                'udr_small_avg_rewards', 'udr_small_low_bnd', 'udr_small_up_bnd',
                'udr_mid_avg_rewards', 'udr_mid_low_bnd', 'udr_mid_up_bnd',
                'udr_large_avg_rewards', 'udr_large_low_bnd', 'udr_large_up_bnd'])
            writer.writerows(zip(vals_to_test,
                    genet_avg_rewards, genet_low_bnd, genet_up_bnd,
                    bbr_avg_rewards, bbr_low_bnd, bbr_up_bnd,
                    cubic_avg_rewards, cubic_low_bnd, cubic_up_bnd,
                    udr_small_avg_rewards, udr_small_low_bnd, udr_small_up_bnd,
                    udr_mid_avg_rewards, udr_mid_low_bnd, udr_mid_up_bnd,
                    udr_large_avg_rewards, udr_large_low_bnd, udr_large_up_bnd))
        save_dir = os.path.join(RESULT_ROOT, EXP_NAME,
                                "sim_eval_vary_{}_bbr_with_cubic.png".format(dim))
        save_dir = os.path.join(RESULT_ROOT, EXP_NAME,
                                "sim_eval_vary_{}_bbr_with_cubic.pdf".format(dim))
        plt.savefig(save_dir)
Exemplo n.º 7
0
syn_traces = [
    generate_trace(
        duration_range=(30, 30),
        bandwidth_range=(2, 2),
        delay_range=(30, 40),
        # delay_range=(100, 200),
        loss_rate_range=(0, 0),
        queue_size_range=(10, 30),
        T_s_range=(0, 0),
        delay_noise_range=(0, 0),
        constant_bw=False) for _ in range(5)
]

aurora_udr_big = Aurora(seed=20,
                        log_dir="tmp",
                        timesteps_per_actorbatch=10,
                        pretrained_model_path=MODEL_PATH,
                        delta_scale=1)

# cubic_rewards, _ = test_on_traces(syn_traces, ['tmp']*len(syn_traces), seed=20)
#
# results, _ = aurora_udr_big.test_on_traces(
#         syn_traces, ['tmp']*len(syn_traces))
# # print(np.mean(np.array(cubic_rewards), axis=0))
# avg_cubic_rewards = np.mean([np.mean(r) for r in cubic_rewards])
# avg_cubic_rewards_errs = compute_std_of_mean([np.mean(r) for r in cubic_rewards])
#
# udr_big_rewards = np.array([np.mean([row[1] for row in result]) for result in results])
# avg_udr_big_rewards = np.mean(udr_big_rewards)
# avg_udr_big_rewards_errs = compute_std_of_mean([np.mean(r) for r in udr_big_rewards])
Exemplo n.º 8
0
        int(10**np.random.uniform(np.log10(5), np.log10(30), 1).item()),
        round(np.random.randint(0, 6, 1).item(), 2),
        # round(np.random.uniform(0, 6, 1).item(), 2),
        round(np.random.uniform(0, 0, 1).item(), 2)))
# config = read_json_file("/tank/zxxia/PCC-RL/results_0503/bo_new/seed_10/bo_0.json")
# config = read_json_file("/tank/zxxia/PCC-RL/results_0503/bo_delay/seed_10/bo_0.json")
# default_configs = []
# for cf in config:
#     # default_configs.append((cf['delay'][0], cf['loss'][0], cf['queue'][0],
#     #                         cf['T_s'][0], cf['delay_noise'][0]))
#     default_configs.append((cf['bandwidth'][0], cf['loss'][0], cf['queue'][0],
#                             cf['T_s'][0], cf['delay_noise'][0]))

aurora_udr_big = Aurora(seed=20,
                        log_dir="tmp",
                        timesteps_per_actorbatch=10,
                        pretrained_model_path=UDR_BIG_MODEL_PATH,
                        delta_scale=1)
aurora_udr_mid = Aurora(seed=20,
                        log_dir="tmp",
                        timesteps_per_actorbatch=10,
                        pretrained_model_path=UDR_MID_MODEL_PATH,
                        delta_scale=1)
aurora_udr_small = Aurora(seed=20,
                          log_dir="tmp",
                          timesteps_per_actorbatch=10,
                          pretrained_model_path=UDR_SMALL_MODEL_PATH,
                          delta_scale=1)

aurora_bo = Aurora(seed=20,
                   log_dir="tmp",
Exemplo n.º 9
0
def main():
    args = parse_args()
    set_seed(args.seed)
    metric = args.dimension
    model_paths = args.model_path
    save_root = args.save_dir
    config_file = args.config_file
    train_config_dir = args.train_config_dir
    print(metric, model_paths, save_root, config_file)

    plt.figure(figsize=(8, 8))
    config = read_json_file(config_file)
    print(config)
    bw_list = config['bandwidth']
    delay_list = config['delay']
    loss_list = config['loss']
    queue_list = config["queue"]

    # run cubic
    cubic_rewards = []
    # cubic_scores = []
    for (bw, delay, loss, queue) in itertools.product(
            bw_list, delay_list, loss_list, queue_list):
        save_dir = f"{save_root}/rand_{metric}/env_{bw}_{delay}_{loss}_{queue}"
        cubic_save_dir = os.path.join(save_dir, "cubic")
        if not args.plot_only:
            t_start = time.time()
            cmd = "python evaluate_cubic.py --bandwidth {} --delay {} " \
                "--loss {} --queue {} --save-dir {} --duration {} --seed {}".format(
                    bw, delay, loss, queue, cubic_save_dir, args.duration, args.seed)
            subprocess.check_output(cmd, shell=True).strip()
            print("run cubic on bw={}Mbps, delay={}ms, loss={}, "
                  "queue={}packets, duration={}s, used {:3f}s".format(
                      bw, delay, loss, queue, args.duration,
                      time.time() - t_start))
        df = pd.read_csv(os.path.join(
            cubic_save_dir, "cubic_test_log.csv"))
        cubic_rewards.append(df['reward'].mean())
        # cubic_scores.append(np.mean(learnability_objective_function(
        #     df['throughput'] * 1500 * 8 / 1e6, df['latency']*1000/2)))

    for model_idx, (model_path, ls, marker, color) in enumerate(
            zip(model_paths, ["-", "--", "-.", "-", "-", "--", "-.", ":"],
                ["x", "s", "v", '*', '+', '^', '>', '1'],
                ["C3", "C3", "C3", "C1", "C1", "C1", "C1", "C1"])):
        model_name = os.path.basename(os.path.dirname(model_path))
        aurora_rewards = []
        # aurora_scores = []
        # detect latest n models here
        last_n_model_paths = get_last_n_models(model_path, args.n_models)
        # construct n Aurora objects and load aurora models here
        last_n_auroras = []
        for tmp_model_path in last_n_model_paths:
            last_n_auroras.append(
                Aurora(seed=args.seed, log_dir="", timesteps_per_actorbatch=10,
                       pretrained_model_path=tmp_model_path,
                       delta_scale=args.delta_scale))

        for (bw, delay, loss, queue) in itertools.product(
                bw_list, delay_list, loss_list, queue_list):
            save_dir = f"{save_root}/rand_{metric}/env_{bw}_{delay}_{loss}_{queue}"
            aurora_save_dir = os.path.join(save_dir, model_name)
            cubic_save_dir = os.path.join(save_dir, "cubic")

            # run aurora
            aurora_rewards.append(
                multiple_runs(last_n_auroras, bw, delay, loss, queue,
                              aurora_save_dir, args.duration, args.plot_only))
        if model_idx == 0:
            plt.plot(config[metric], cubic_rewards, 'o-', c="C0",
                     label="TCP Cubic")
        if train_config_dir is not None:
            train_config = read_json_file(os.path.join(
                train_config_dir, model_name+'.json'))
            env = 'bw=[{}, {}]Mbps, delay=[{}, {}]ms, loss=[{}, {}], queue=[{}, {}]pkt'.format(
                train_config[0]['bandwidth'][0], train_config[0]['bandwidth'][1],
                train_config[0]['delay'][0], train_config[0]['delay'][1],
                train_config[0]['loss'][0], train_config[0]['loss'][1],
                train_config[0]['queue'][0], train_config[0]['queue'][1])
        else:
            env = ""
            # raise RuntimeError
        assert ls in {'', '-', '--', '-.', ':', None}
        plt.plot(config[metric], aurora_rewards, marker=marker,
                 linestyle=ls, c=color, label=model_name + ", " + env)
    plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.2), loc="lower left",
               mode="expand", ncol=1, )
    if metric == "bandwidth":
        unit = "Mbps"
    elif metric == 'delay':
        unit = 'ms'
    elif metric == 'loss':
        unit = ''
    elif metric == 'queue':
        unit = 'packets'
    else:
        raise RuntimeError

    plt.xlabel("{} ({})".format(metric, unit))
    plt.ylabel('Reward')
    # plt.ylabel('log(throughput) - log(delay)')
    plt.tight_layout()
    plt.savefig(os.path.join(
        args.save_dir, "rand_{}_sim.png".format(metric)))
    plt.close()