Esempio n. 1
0
    def init_url_user_auth_data(self):
        user_ids = []
        users = read_json_file(self._user_file_path())['users']

        for user in users:
            roles = []
            for role in user['roles']:
                auth_objects = []
                for auth in role['authorizations']:
                    # 创建授权和资源
                    url = URLOperation(**auth['resource'])
                    url.save()
                    auth_obj, created = Authorization.objects.get_or_create(
                        name=auth['name'],
                        resource_id=url.pk,
                        type=auth['type'])
                    auth_objects.append(auth_obj)

                role_obj, created = Role.objects.get_or_create(
                    name=role['name'])
                for auth in auth_objects:
                    role_obj.authorizations.add(auth)
                roles.append(role_obj)
            password = make_password(user['password'], user['salt'])
            user_obj = User(name=user['name'],
                            password=password,
                            salt=user['salt'])
            user_obj.save()
            for role in roles:
                user_obj.roles.add(role)
            user_ids.append(user_obj.pk)
        return user_ids
Esempio n. 2
0
def to_csv(config_file):
    bo_log = read_json_file(config_file)
    csv_file = os.path.join(
        os.path.dirname(config_file),
        os.path.splitext(os.path.basename(config_file))[0] + ".csv")
    with open(csv_file, 'w') as f:
        writer = csv.writer(f, lineterminator='\n')
        header = ['bandwidth_lower_bound_min', 'bandwidth_lower_bound_max',
                  'bandwidth_upper_bound_min', 'bandwidth_upper_bound_max',
                  'delay_min', 'delay_max', 'queue_min', 'queue_max', 'loss_min',
                  'loss_max', 'T_s_min', 'T_s_max', 'delay_noise_min',
                  'delay_noise_max', 'duration_min', 'duration_max', 'weight']
        writer.writerow(header)
        for config in bo_log:
            writer.writerow([
                config['bandwidth_lower_bound'][0],
                config['bandwidth_lower_bound'][1],
                config['bandwidth_upper_bound'][0],
                config['bandwidth_upper_bound'][1],
                config['delay'][0], config['delay'][1],
                config['queue'][0], config['queue'][1],
                config['loss'][0], config['loss'][1],
                config['T_s'][0], config['T_s'][1],
                config['delay_noise'][0], config['delay_noise'][1],
                config['duration'][0], config['duration'][1],
                config['weight']])
Esempio n. 3
0
    def __init__(self,
                 trace_file,
                 calibrate_timestamps=False,
                 use_cache=True,
                 start_time=None,
                 end_time=None):
        self.use_cache = use_cache
        trace_file_basename = os.path.basename(trace_file)
        trace_file_dirname = os.path.dirname(trace_file)
        cc = extract_cc_name(trace_file)
        summary_path = os.path.join(str(trace_file_dirname),
                                    '{}_conn_summary.json'.format(cc))
        self.cache = {}
        if not self.use_cache or (self.use_cache
                                  and not os.path.exists(summary_path)):
            self.datalink = Flow(trace_file,
                                 start_time=start_time,
                                 end_time=end_time)
            self.acklink = Flow(os.path.join(
                str(trace_file_dirname),
                str(trace_file_basename.replace("datalink", "acklink"))),
                                start_time=start_time,
                                end_time=end_time)
            if calibrate_timestamps:
                self.t_offset = min(self.datalink.throughput_timestamps[0],
                                    self.datalink.sending_rate_timestamps[0])
            else:
                self.t_offset = 0

            self.cache['cc'] = self.cc
            self.cache[
                'link_capacity_timestamps'] = self.link_capacity_timestamps
            self.cache['link_capacity'] = self.link_capacity
            self.cache['avg_link_capacity'] = self.avg_link_capacity
            self.cache['throughput_timestamps'] = self.throughput_timestamps
            self.cache['throughput'] = self.throughput
            self.cache['avg_throughput'] = self.avg_throughput
            self.cache[
                'sending_rate_timestamps'] = self.sending_rate_timestamps
            self.cache['sending_rate'] = self.sending_rate
            self.cache['avg_sending_rate'] = self.avg_sending_rate
            self.cache[
                'datalink_delay_timestamps'] = self.datalink_delay_timestamps
            self.cache['datalink_delay'] = self.datalink_delay
            self.cache[
                'acklink_delay_timestamps'] = self.acklink_delay_timestamps
            self.cache['acklink_delay'] = self.acklink_delay
            self.cache['loss_rate'] = self.datalink.loss_rate
            self.cache['min_one_way_delay'] = self.min_one_way_delay
            self.cache['min_rtt'] = self.min_rtt
            self.cache['rtt_timestamps'] = self.rtt_timestamps
            self.cache['rtt'] = self.rtt
            self.cache['avg_rtt'] = self.avg_rtt
            self.cache['percentile_rtt'] = self.percentile_rtt

            write_json_file(summary_path, self.cache)
        else:
            self.cache = read_json_file(summary_path)
Esempio n. 4
0
 def load_from_file(filename: str):
     trace_data = read_json_file(filename)
     tr = Trace(trace_data['timestamps'],
                trace_data['bandwidths'],
                trace_data['delays'],
                trace_data['loss'],
                trace_data['queue'],
                delay_noise=trace_data['delay_noise']
                if 'delay_noise' in trace_data else 0)
     return tr
Esempio n. 5
0
 def __init__(self, filename: str) -> None:
     self.filename = filename
     if filename and os.path.exists(filename):
         self.rand_ranges = read_json_file(filename)
         assert isinstance(self.rand_ranges, List) and len(
             self.rand_ranges) >= 1, "rand_ranges object should be a list with length at least 1."
         weight_sum = 0
         for rand_range in self.rand_ranges:
             weight_sum += rand_range['weight']
         assert weight_sum == 1.0, "Weight sum should be 1."
         self.parameters = set(self.rand_ranges[0].keys())
         self.parameters.remove('weight')
     else:
         self.rand_ranges = []
         self.parameters = set()
Esempio n. 6
0
    def reset(self):
        self.steps_taken = 0
        self.net.reset()
        # old snippet start
        # self.current_trace = np.random.choice(self.traces)
        # old snippet end

        # choose real trace with a probability. otherwise, use synthetic trace
        if self.train_flag and self.config_file:
            self.current_trace = generate_traces(self.config_file,
                                                 1,
                                                 duration=30)[0]
            if random.uniform(0, 1) < self.real_trace_prob and self.traces:
                cur_config = read_json_file(self.config_file)[0]
                cur_config['weight'] = 1
                self.current_trace = generate_trace_from_config([cur_config])
                import pdb
                pdb.set_trace()
        else:
            self.current_trace = np.random.choice(self.traces)

        # if self.train_flag and not self.config_file:
        #     bdp = np.max(self.current_trace.bandwidths) / BYTES_PER_PACKET / \
        #             BITS_PER_BYTE * 1e6 * np.max(self.current_trace.delays) * 2 / 1000
        #     self.current_trace.queue_size = max(2, int(bdp * np.random.uniform(0.2, 3.0))) # hard code this for now
        #     loss_rate_exponent = float(np.random.uniform(np.log10(0+1e-5), np.log10(0.5+1e-5), 1))
        #     if loss_rate_exponent < -4:
        #         loss_rate = 0
        #     else:
        #         loss_rate = 10**loss_rate_exponent
        #     self.current_trace.loss_rate = loss_rate

        self.current_trace.reset()
        self.create_new_links_and_senders()
        self.net = Network(self.senders, self.links, self)
        self.episodes_run += 1

        # old code snippet start
        # if self.train_flag and self.config_file is not None and self.episodes_run % 100 == 0:
        #     self.traces = generate_traces(self.config_file, 10, duration=30)
        # old code snippet end
        self.net.run_for_dur(self.run_dur)
        self.reward_ewma *= 0.99
        self.reward_ewma += 0.01 * self.reward_sum
        self.reward_sum = 0.0
        return self._get_all_sender_obs()
Esempio n. 7
0
def generate_configs(config_file: str, n: int):
    config_range = read_json_file(config_file)[0]
    configs = []

    for _ in range(n):
        min_bw = 10**np.random.uniform(
            np.log10(config_range['bandwidth_lower_bound'][0]),
            np.log10(config_range['bandwidth_lower_bound'][1]), 1)[0]
        max_bw = 10**np.random.uniform(
            np.log10(config_range['bandwidth_upper_bound'][0]),
            np.log10(config_range['bandwidth_upper_bound'][1]), 1)[0]
        delay = np.random.uniform(config_range['delay'][0],
                                  config_range['delay'][1], 1)[0]
        queue = np.random.uniform(config_range['queue'][0],
                                  config_range['queue'][1], 1)[0]
        T_s = np.random.uniform(config_range['T_s'][0], config_range['T_s'][1],
                                1)[0]
        loss_exponent = np.random.uniform(
            np.log10(config_range['loss'][0] + 1e-5),
            np.log10(config_range['loss'][1] + 1e-5), 1)[0]
        loss = 0 if loss_exponent < -4 else 10**loss_exponent
        configs.append([min_bw, max_bw, delay, queue, loss, T_s])

    return configs
Esempio n. 8
0
File: test.py Progetto: zxxia/PCC-RL
def main():
    bbr = BBR(True)
    cubic = Cubic(True)
    # cubic = Cubic(20)
    genet = Aurora(seed=20, log_dir=RESULT_ROOT,
                   pretrained_model_path=GENET_MODEL_PATH,
                   timesteps_per_actorbatch=10, delta_scale=1)
    udr_small = Aurora(seed=20, log_dir=RESULT_ROOT,
                       pretrained_model_path=UDR_SMALL_MODEL_PATH,
                       timesteps_per_actorbatch=10, delta_scale=1)
    udr_mid = Aurora(seed=20, log_dir=RESULT_ROOT,
                     pretrained_model_path=UDR_MID_MODEL_PATH,
                     timesteps_per_actorbatch=10, delta_scale=1)
    udr_large = Aurora(seed=20, log_dir=RESULT_ROOT,
                       pretrained_model_path=UDR_LARGE_MODEL_PATH,
                       timesteps_per_actorbatch=10, delta_scale=1)

    # for _ in range(10):
    #     DEFAULT_CONFIGS.append(
    #         {
    #          "bandwidth": 10 ** np.random.uniform(np.log10(1), np.log10(10), 1).item(),
    #          "delay": np.random.uniform(5, 200, 1).item(),
    #          "loss": np.random.uniform(0, 0.0, 1).item(),
    #          "queue": 10 ** np.random.uniform(np.log10(2), np.log10(30), 1).item(),
    #          "T_s": np.random.randint(0, 6, 1).item(),
    #          "delay_noise": np.random.uniform(0, 0, 1).item()})
    # print(DEFAULT_CONFIGS)
    udr_train_config = read_json_file(UDR_TRAIN_CONFIG_FILE)[0]
    # print(udr_train_config)
    for dim, unit in zip(DIMS_TO_VARY, DIM_UNITS):
        print(dim, udr_train_config[dim])
        if dim == 'bandwidth':
            vals_to_test = 10**np.linspace(
                np.log10(1), np.log10(udr_train_config[dim][1]), 10)
        elif dim == 'queue':
            vals_to_test = 10**np.linspace(
                np.log10(udr_train_config[dim][0]), np.log10(udr_train_config[dim][1]), 10)
        elif dim == 'loss':
            vals_to_test = np.linspace(0, 0.005, 10)
        else:
            vals_to_test = np.linspace(
                udr_train_config[dim][0], udr_train_config[dim][1], 10)
        print(vals_to_test)
        bbr_avg_rewards = []
        bbr_reward_errs = []
        cubic_avg_rewards = []
        cubic_reward_errs = []
        udr_small_avg_rewards = []
        udr_small_reward_errs = []
        udr_mid_avg_rewards = []
        udr_mid_reward_errs = []
        udr_large_avg_rewards = []
        udr_large_reward_errs = []
        genet_avg_rewards = []
        genet_reward_errs = []
        for val_idx, val in enumerate(vals_to_test):
            if dim == 'bandwidth':
                max_bw = val
            else:
                max_bw = DEFAULT_VALUES['bandwidth']

            if dim == 'delay':
                min_delay, max_delay = val, val
            else:
                min_delay, max_delay = DEFAULT_VALUES['delay'], DEFAULT_VALUES['delay']
            if dim == 'loss':
                min_loss, max_loss = val, val
            else:
                min_loss, max_loss = DEFAULT_VALUES['loss'], DEFAULT_VALUES['loss']
            if dim == 'queue':
                min_queue, max_queue = val, val
            else:
                min_queue, max_queue = DEFAULT_VALUES['queue'], DEFAULT_VALUES['queue']
            if dim == 'T_s':
                min_T_s, max_T_s = val, val
            else:
                min_T_s, max_T_s = DEFAULT_VALUES['T_s'], DEFAULT_VALUES['T_s']

            # generate n=10 traces for each config
            traces = [generate_trace(duration_range=(30, 30),
                                     bandwidth_range=(1, max_bw),
                                     delay_range=(min_delay, max_delay),
                                     loss_rate_range=(min_loss, max_loss),
                                     queue_size_range=(min_queue, max_queue),
                                     T_s_range=(min_T_s, max_T_s),
                                     delay_noise_range=(0, 0),
                                     constant_bw=False, seed=i) for i in range(10)]
            bbr_rewards = []
            cubic_rewards = []
            udr_small_rewards = []
            udr_mid_rewards = []
            udr_large_rewards = []
            genet_rewards = []
            for i, trace in enumerate(tqdm(traces)):
                save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                    dim), "val_{}".format(val_idx), "trace_{}".format(i))
                os.makedirs(save_dir, exist_ok=True)
                trace_file = os.path.join(save_dir, "trace_{}.json".format(i))
                trace.dump(trace_file)

                # bbr
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "bbr")
                # os.makedirs(save_dir, exist_ok=True)
                #
                # if os.path.exists(os.path.join(save_dir, "bbr_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "bbr_packet_log.csv"))
                #     pkt_level_reward = pkt_log.get_reward("", trace)
                # else:
                #     test_reward, pkt_level_reward = bbr.test(trace, save_dir)
                #
                # # bbr_rewards.append(test_reward)
                # bbr_rewards.append(pkt_level_reward)

                # cubic
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "cubic")
                # os.makedirs(save_dir, exist_ok=True)
                #
                # if os.path.exists(os.path.join(save_dir, "cubic_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "cubic_packet_log.csv"))
                #     pkt_level_reward = pkt_log.get_reward("", trace)
                # else:
                #     test_reward, pkt_level_reward = cubic.test(trace, save_dir)
                # # cubic_rewards.append(test_reward)
                # cubic_rewards.append(pkt_level_reward)

                # cmd = "python ../plot_scripts/plot_packet_log.py --log-file {} " \
                #     "--save-dir {} --trace-file {}".format(
                #         os.path.join(save_dir, "cubic_packet_log.csv"),
                #         save_dir, trace_file)
                # subprocess.check_output(cmd, shell=True).strip()
                # cmd = "python ../plot_scripts/plot_time_series.py --log-file {} " \
                #     "--save-dir {} --trace-file {}".format(
                #         os.path.join(save_dir, "cubic_simulation_log.csv"),
                #         save_dir, trace_file)
                # subprocess.check_output(cmd, shell=True).strip()

                # genet
                save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                    dim), "val_{}".format(val_idx), "trace_{}".format(i), "genet")
                if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                    pkt_log = PacketLog.from_log_file(
                        os.path.join(save_dir, "aurora_packet_log.csv"))
                else:
                    _, reward_list, _, _, _, _, _, _, _, pkt_log = genet.test(
                        trace, save_dir)
                    pkt_log = PacketLog.from_log(pkt_log)
                genet_rewards.append(pkt_log.get_reward("", trace))

                # udr_small
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_small")
                # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "aurora_packet_log.csv"))
                # else:
                #     _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_small.test(
                #         trace, save_dir)
                #     pkt_log = PacketLog.from_log(pkt_log)
                # udr_small_rewards.append(pkt_log.get_reward("", trace))
                #
                # # udr_mid
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_mid")
                # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "aurora_packet_log.csv"))
                # else:
                #     _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_mid.test(
                #         trace, save_dir)
                #     pkt_log = PacketLog.from_log(pkt_log)
                # udr_mid_rewards.append(pkt_log.get_reward("", trace))
                # _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_mid.test(
                #     trace, save_dir)
                # # test_reward = np.mean(reward_list)
                # # udr_mid_rewards.append(test_reward)
                #
                # # udr_large
                # save_dir = os.path.join(RESULT_ROOT, EXP_NAME, "vary_{}".format(
                #     dim), "val_{}".format(val_idx), "trace_{}".format(i), "udr_large")
                # os.makedirs(save_dir, exist_ok=True)
                # if os.path.exists(os.path.join(save_dir, "aurora_packet_log.csv")):
                #     pkt_log = PacketLog.from_log_file(
                #         os.path.join(save_dir, "aurora_packet_log.csv"))
                # else:
                #     _, reward_list, _, _, _, _, _, _, _, pkt_log = udr_large.test(
                #         trace, save_dir)
                #     pkt_log = PacketLog.from_log(pkt_log)
                # # test_reward = np.mean(reward_list)
                # # udr_large_rewards.append(test_reward)
                # udr_large_rewards.append(pkt_log.get_reward("", trace))
                # # cmd = "python ../plot_scripts/plot_packet_log.py --log-file {} " \
                # #     "--save-dir {} --trace-file {}".format(
                # #         os.path.join(save_dir, "aurora_packet_log.csv"),
                # #         save_dir, trace_file)
                # # subprocess.check_output(cmd, shell=True).strip()
                # # cmd = "python ../plot_scripts/plot_time_series.py --log-file {} " \
                # #     "--save-dir {} --trace-file {}".format(
                # #         os.path.join(save_dir, "aurora_simulation_log.csv"),
                # #         save_dir, trace_file)
                # # subprocess.check_output(cmd, shell=True).strip()
                #
                # # # genet_model.test(trace)
            print(len(cubic_avg_rewards), len(udr_large_avg_rewards))
            bbr_avg_rewards.append(np.mean(bbr_rewards))
            bbr_reward_errs.append(
                np.std(bbr_rewards) / np.sqrt(len(bbr_rewards)))
            cubic_avg_rewards.append(np.mean(cubic_rewards))
            cubic_reward_errs.append(
                np.std(cubic_rewards) / np.sqrt(len(cubic_rewards)))
            udr_small_avg_rewards.append(np.mean(udr_small_rewards))
            udr_small_reward_errs.append(
                np.std(udr_small_rewards) / np.sqrt(len(udr_small_rewards)))
            udr_mid_avg_rewards.append(np.mean(udr_mid_rewards))
            udr_mid_reward_errs.append(
                np.std(udr_mid_rewards) / np.sqrt(len(udr_mid_rewards)))
            udr_large_avg_rewards.append(np.mean(udr_large_rewards))
            udr_large_reward_errs.append(
                np.std(udr_large_rewards) / np.sqrt(len(udr_large_rewards)))

            genet_avg_rewards.append(np.mean(genet_rewards))
            genet_reward_errs.append(
                np.std(genet_rewards) / np.sqrt(len(genet_rewards)))
        plt.figure()
        ax = plt.gca()

        import pdb
        pdb.set_trace()
        ax.plot(vals_to_test, genet_avg_rewards, color='C2',
                linewidth=4, alpha=1, linestyle='-', label="GENET")
        genet_low_bnd = np.array(genet_avg_rewards) - \
            np.array(genet_reward_errs)
        genet_up_bnd = np.array(genet_avg_rewards) + \
            np.array(genet_reward_errs)
        ax.fill_between(vals_to_test, genet_low_bnd,
                        genet_up_bnd, color='C2', alpha=0.1)

        ax.plot(vals_to_test, bbr_avg_rewards, color='C0',
                linestyle='-.', linewidth=4, alpha=1, label="BBR")
        bbr_low_bnd = np.array(bbr_avg_rewards) - \
            np.array(bbr_reward_errs)
        bbr_up_bnd = np.array(bbr_avg_rewards) + \
            np.array(bbr_reward_errs)
        ax.fill_between(vals_to_test, bbr_low_bnd,
                        bbr_up_bnd, color='C0', alpha=0.1)

        ax.plot(vals_to_test, cubic_avg_rewards, color='C0',
                linestyle='-', linewidth=4, alpha=1, label="TCP Cubic")
        cubic_low_bnd = np.array(cubic_avg_rewards) - \
            np.array(cubic_reward_errs)
        cubic_up_bnd = np.array(cubic_avg_rewards) + \
            np.array(cubic_reward_errs)
        ax.fill_between(vals_to_test, cubic_low_bnd,
                        cubic_up_bnd, color='C0', alpha=0.1)

        ax.plot(vals_to_test, udr_small_avg_rewards, color='grey',
                linewidth=4, linestyle=':', label="UDR-1")
        udr_small_low_bnd = np.array(
            udr_small_avg_rewards) - np.array(udr_small_reward_errs)
        udr_small_up_bnd = np.array(
            udr_small_avg_rewards) + np.array(udr_small_reward_errs)
        ax.fill_between(vals_to_test, udr_small_low_bnd,
                        udr_small_up_bnd, color='grey', alpha=0.1)

        ax.plot(vals_to_test, udr_mid_avg_rewards, color='grey',
                linewidth=4, linestyle='--', label="UDR-2")
        udr_mid_low_bnd = np.array(
            udr_mid_avg_rewards) - np.array(udr_mid_reward_errs)
        udr_mid_up_bnd = np.array(udr_mid_avg_rewards) + \
            np.array(udr_mid_reward_errs)
        ax.fill_between(vals_to_test, udr_mid_low_bnd,
                        udr_mid_up_bnd, color='grey', alpha=0.1)

        ax.plot(vals_to_test, udr_large_avg_rewards, color='grey',
                linewidth=4, linestyle='-.', label="UDR-3")
        udr_large_low_bnd = np.array(
            udr_large_avg_rewards) - np.array(udr_large_reward_errs)
        udr_large_up_bnd = np.array(
            udr_large_avg_rewards) + np.array(udr_large_reward_errs)
        ax.fill_between(vals_to_test, udr_large_low_bnd,
                        udr_large_up_bnd, color='grey', alpha=0.1)
        ax.set_xlabel("{}({})".format(dim, unit))
        ax.set_ylabel("Reward")
        ax.legend()
        plt.tight_layout()
        with open(os.path.join(RESULT_ROOT, EXP_NAME, "sim_eval_vary_{}_bbr_with_cubic.csv".format(dim)), 'w') as f:
            writer = csv.writer(f)
            writer.writerow([dim, 'genet_avg_rewards', 'genet_low_bnd', 'genet_up_bnd',
                'bbr_avg_rewards', 'bbr_low_bnd', 'bbr_up_bnd',
                'cubic_avg_rewards', 'cubic_low_bnd', 'cubic_up_bnd',
                'udr_small_avg_rewards', 'udr_small_low_bnd', 'udr_small_up_bnd',
                'udr_mid_avg_rewards', 'udr_mid_low_bnd', 'udr_mid_up_bnd',
                'udr_large_avg_rewards', 'udr_large_low_bnd', 'udr_large_up_bnd'])
            writer.writerows(zip(vals_to_test,
                    genet_avg_rewards, genet_low_bnd, genet_up_bnd,
                    bbr_avg_rewards, bbr_low_bnd, bbr_up_bnd,
                    cubic_avg_rewards, cubic_low_bnd, cubic_up_bnd,
                    udr_small_avg_rewards, udr_small_low_bnd, udr_small_up_bnd,
                    udr_mid_avg_rewards, udr_mid_low_bnd, udr_mid_up_bnd,
                    udr_large_avg_rewards, udr_large_low_bnd, udr_large_up_bnd))
        save_dir = os.path.join(RESULT_ROOT, EXP_NAME,
                                "sim_eval_vary_{}_bbr_with_cubic.png".format(dim))
        save_dir = os.path.join(RESULT_ROOT, EXP_NAME,
                                "sim_eval_vary_{}_bbr_with_cubic.pdf".format(dim))
        plt.savefig(save_dir)
Esempio n. 9
0
def main():
    args = parse_args()
    set_seed(args.seed)
    # tokens = os.path.basename(os.path.dirname(os.path.dirname(args.save_dir))).split('_')
    # config0_dim0_idx = int(tokens[1])
    # config0_dim1_idx = int(tokens[2])
    # config1_dim0_idx = int(tokens[4])
    # config1_dim1_idx = int(tokens[5])

    dim0, dim1 = args.dims
    config = read_json_file(args.config_file)[0]
    assert dim0 in config and dim1 in config

    # dim0_vals = np.linspace(config[dim0][0], config[dim0][1], 10)
    # dim1_vals = np.linspace(config[dim1][0], config[dim1][1], 10)
    dim0_vals = get_dim_vals(dim0)
    dim1_vals = get_dim_vals(dim1)
    print(dim0_vals)
    print(dim1_vals)
    traces = []
    save_dirs = []
    with open('heatmap_trace_cnt_ratio.npy', 'rb') as f:
        cnt_ratio = np.load(f)
    for dim0_idx, dim0_val in enumerate(dim0_vals):
        for dim1_idx, dim1_val in enumerate(dim1_vals):
            dim_vals = copy.copy(DEFAULT_VALUES)
            dim_vals[dim0] = dim0_val
            dim_vals[dim1] = dim1_val
            # print(i, dim0_val, dim1_val, dim_vals)
            cnt = 10
            # if cnt_ratio[dim0_idx, dim1_idx] > 1:
            #     cnt *= int(cnt_ratio[dim0_idx, dim1_idx])
            # print(cnt)
            for trace_idx in range(cnt):
                trace = generate_trace(
                    duration_range=(dim_vals['duration'],
                                    dim_vals['duration']),
                    bandwidth_lower_bound_range=(
                        dim_vals['bandwidth_lower_bound'],
                        dim_vals['bandwidth_lower_bound']),
                    bandwidth_upper_bound_range=(
                        dim_vals['bandwidth_upper_bound'],
                        dim_vals['bandwidth_upper_bound']),
                    delay_range=(dim_vals['delay'], dim_vals['delay']),
                    loss_rate_range=(dim_vals['loss'], dim_vals['loss']),
                    queue_size_range=(dim_vals['queue'], dim_vals['queue']),
                    T_s_range=(dim_vals['T_s'], dim_vals['T_s']),
                    delay_noise_range=(dim_vals['delay_noise'],
                                       dim_vals['delay_noise']))
                traces.append(trace)
                save_dir = os.path.join(
                    args.save_dir, 'pair_{}_{}'.format(dim0_idx, dim1_idx),
                    'trace_{}'.format(trace_idx))
                save_dirs.append(save_dir)
                os.makedirs(save_dir, exist_ok=True)
                trace.dump(
                    os.path.join(save_dir, 'trace_{}.json'.format(trace_idx)))
    if args.cc == 'genet_bbr' or args.cc == 'genet_cubic' or args.cc == 'genet_bbr_old':
        genet_seed = ''
        for s in args.models_path.split('/'):
            if 'seed' in s:
                genet_seed = s
        for bo in range(0, 30, 3):
            # for bo_dir in natural_sort(glob.glob(os.path.join(args.models_path, "bo_*/"))):
            bo_dir = os.path.join(args.models_path, "bo_{}".format(bo))
            step = 64800
            model_path = os.path.join(bo_dir,
                                      'model_step_{}.ckpt'.format(step))
            if not os.path.exists(model_path + '.meta'):
                print(model_path, 'does not exist')
                continue
            print(model_path)
            genet_save_dirs = [
                os.path.join(save_dir, args.cc, genet_seed, "bo_{}".format(bo),
                             "step_{}".format(step)) for save_dir in save_dirs
            ]
            t_start = time.time()
            test_on_traces(model_path, traces, genet_save_dirs, args.nproc, 42,
                           False, False)
            print('bo {}: {:.3f}'.format(bo, time.time() - t_start))
    elif args.cc == 'pretrained':
        pretrained_save_dirs = [
            os.path.join(save_dir, args.cc) for save_dir in save_dirs
        ]
        t_start = time.time()
        test_on_traces(args.models_path, traces, pretrained_save_dirs,
                       args.nproc, 42, False, False)
        print('pretrained: {:.3f}'.format(time.time() - t_start))
    elif args.cc == 'overfit_config':
        overfit_config_save_dirs = [
            os.path.join(save_dir, args.cc) for save_dir in save_dirs
        ]
        t_start = time.time()
        test_on_traces(args.models_path, traces, overfit_config_save_dirs,
                       args.nproc, 42, False, False)
        print('overfit_config: {:.3f}'.format(time.time() - t_start))
    else:
        if args.cc == 'bbr':
            cc = BBR(False)
        elif args.cc == 'cubic':
            cc = Cubic(False)
        elif args.cc == 'bbr_old':
            cc = BBR_old(False)
        else:
            raise NotImplementedError
        heuristic_save_dirs = [
            os.path.join(save_dir, cc.cc_name) for save_dir in save_dirs
        ]
        t_start = time.time()
        cc.test_on_traces(traces, heuristic_save_dirs, False, args.nproc)
        print('{}: {:.3f}'.format(args.cc, time.time() - t_start))
Esempio n. 10
0
 def load_from_file(filename: str):
     trace_data = read_json_file(filename)
     tr = AbrTrace(trace_data['timestamps'], trace_data['bandwidths'],
                   trace_data['link_rtt'], trace_data['buffer_thresh'],
                   trace_data['name'])
     return tr
Esempio n. 11
0
def generate_trace_from_config_file(config_file: str):
    config = read_json_file(config_file)
    return generate_trace_from_config(config)
Esempio n. 12
0
def generate_trace_from_config_file(config_file: str,
                                    duration: int = 30) -> Trace:
    config = read_json_file(config_file)
    return generate_trace_from_config(config, duration)
Esempio n. 13
0
def main():
    args = parse_args()
    udr_large = read_json_file(args.config_file)[0]

    for i in range(10, 110, 10):
        set_seed(i)
        bw_upper_bnd_min, bw_upper_bnd_max = udr_large['bandwidth_upper_bound']
        new_bw_upper_bnd_min, new_bw_upper_bnd_max = gen_random_range(
            'bandwidth_upper_bound', bw_upper_bnd_min, bw_upper_bnd_max, True)

        bw_lower_bnd_min, bw_lower_bnd_max = udr_large['bandwidth_lower_bound']
        new_bw_lower_bnd_min, new_bw_lower_bnd_max = gen_random_range(
            'bandwidth_lower_bound', bw_lower_bnd_min, new_bw_upper_bnd_max,
            True)
        while new_bw_lower_bnd_min > new_bw_upper_bnd_min:
            new_bw_lower_bnd_min, new_bw_lower_bnd_max = gen_random_range(
                'bandwidth_lower_bound', bw_lower_bnd_min,
                new_bw_upper_bnd_max, True)

        delay_min, delay_max = udr_large['delay']
        new_delay_min, new_delay_max = gen_random_range(
            'delay', delay_min, delay_max)
        loss_min, loss_max = udr_large['loss']
        new_loss_min, new_loss_max = gen_random_range('loss', loss_min,
                                                      loss_max, True)
        queue_min, queue_max = udr_large['queue']
        new_queue_min, new_queue_max = gen_random_range(
            'queue', queue_min, queue_max)
        T_s_min, T_s_max = udr_large['T_s']
        new_T_s_min, new_T_s_max = gen_random_range('T_s', T_s_min, T_s_max)
        delay_noise_min, delay_noise_max = udr_large['delay_noise']
        new_delay_noise_min, new_delay_noise_max = gen_random_range(
            'delay_noise', delay_noise_min, delay_noise_max)

        udr_mid = copy.deepcopy(udr_large)
        udr_mid['bandwidth_lower_bound'][0] = new_bw_lower_bnd_min
        udr_mid['bandwidth_lower_bound'][1] = new_bw_lower_bnd_max
        udr_mid['bandwidth_upper_bound'][0] = new_bw_upper_bnd_min
        udr_mid['bandwidth_upper_bound'][1] = new_bw_upper_bnd_max
        udr_mid['delay'][0] = new_delay_min
        udr_mid['delay'][1] = new_delay_max
        udr_mid['loss'][0] = new_loss_min
        udr_mid['loss'][1] = new_loss_max
        udr_mid['queue'][0] = new_queue_min
        udr_mid['queue'][1] = new_queue_max
        udr_mid['T_s'][0] = new_T_s_min
        udr_mid['T_s'][1] = new_T_s_max
        udr_mid['delay_noise'][0] = new_delay_noise_min
        udr_mid['delay_noise'][1] = new_delay_noise_max

        write_json_file(
            os.path.join(args.save_dir, 'udr_mid_seed_{}.json'.format(i)),
            [udr_mid])

        set_seed(i)
        bw_upper_bnd_min, bw_upper_bnd_max = udr_large['bandwidth_upper_bound']
        new_bw_upper_bnd_min, new_bw_upper_bnd_max = gen_random_range(
            'bandwidth_upper_bound',
            bw_upper_bnd_min,
            bw_upper_bnd_max,
            True,
            single_point=True)
        new_bw_lower_bnd_min, new_bw_lower_bnd_max = new_bw_upper_bnd_min, new_bw_upper_bnd_max

        delay_min, delay_max = udr_large['delay']
        new_delay_min, new_delay_max = gen_random_range('delay',
                                                        delay_min,
                                                        delay_max,
                                                        False,
                                                        1 / 9,
                                                        single_point=True)
        loss_min, loss_max = udr_large['loss']
        new_loss_min, new_loss_max = gen_random_range('loss',
                                                      loss_min,
                                                      loss_max,
                                                      True,
                                                      1 / 9,
                                                      single_point=True)
        queue_min, queue_max = udr_large['queue']
        new_queue_min, new_queue_max = gen_random_range('queue',
                                                        queue_min,
                                                        queue_max,
                                                        False,
                                                        1 / 9,
                                                        single_point=True)
        T_s_min, T_s_max = udr_large['T_s']
        new_T_s_min, new_T_s_max = gen_random_range('T_s',
                                                    T_s_min,
                                                    T_s_max,
                                                    False,
                                                    1 / 9,
                                                    single_point=True)
        delay_noise_min, delay_noise_max = udr_large['delay_noise']
        new_delay_noise_min, new_delay_noise_max = gen_random_range(
            'delay_noise',
            delay_noise_min,
            delay_noise_max,
            False,
            1 / 9,
            single_point=True)

        udr_small = copy.deepcopy(udr_large)
        udr_small['bandwidth_lower_bound'][0] = new_bw_lower_bnd_min
        udr_small['bandwidth_lower_bound'][1] = new_bw_lower_bnd_max
        udr_small['bandwidth_upper_bound'][0] = new_bw_upper_bnd_min
        udr_small['bandwidth_upper_bound'][1] = new_bw_upper_bnd_max
        udr_small['delay'][0] = new_delay_min
        udr_small['delay'][1] = new_delay_max
        udr_small['loss'][0] = new_loss_min
        udr_small['loss'][1] = new_loss_max
        udr_small['queue'][0] = new_queue_min
        udr_small['queue'][1] = new_queue_max
        udr_small['T_s'][0] = new_T_s_min
        udr_small['T_s'][1] = new_T_s_max
        udr_small['delay_noise'][0] = new_delay_noise_min
        udr_small['delay_noise'][1] = new_delay_noise_max

        write_json_file(
            os.path.join(args.save_dir, 'udr_small_seed_{}.json'.format(i)),
            [udr_small])
Esempio n. 14
0
def main():
    args = parse_args()
    set_seed(args.seed)
    metric = args.dimension
    model_paths = args.model_path
    save_root = args.save_dir
    config_file = args.config_file
    train_config_dir = args.train_config_dir
    print(metric, model_paths, save_root, config_file)

    plt.figure(figsize=(8, 8))
    config = read_json_file(config_file)
    print(config)
    bw_list = config['bandwidth']
    delay_list = config['delay']
    loss_list = config['loss']
    queue_list = config["queue"]

    # run cubic
    cubic_rewards = []
    # cubic_scores = []
    for (bw, delay, loss, queue) in itertools.product(
            bw_list, delay_list, loss_list, queue_list):
        save_dir = f"{save_root}/rand_{metric}/env_{bw}_{delay}_{loss}_{queue}"
        cubic_save_dir = os.path.join(save_dir, "cubic")
        if not args.plot_only:
            t_start = time.time()
            cmd = "python evaluate_cubic.py --bandwidth {} --delay {} " \
                "--loss {} --queue {} --save-dir {} --duration {} --seed {}".format(
                    bw, delay, loss, queue, cubic_save_dir, args.duration, args.seed)
            subprocess.check_output(cmd, shell=True).strip()
            print("run cubic on bw={}Mbps, delay={}ms, loss={}, "
                  "queue={}packets, duration={}s, used {:3f}s".format(
                      bw, delay, loss, queue, args.duration,
                      time.time() - t_start))
        df = pd.read_csv(os.path.join(
            cubic_save_dir, "cubic_test_log.csv"))
        cubic_rewards.append(df['reward'].mean())
        # cubic_scores.append(np.mean(learnability_objective_function(
        #     df['throughput'] * 1500 * 8 / 1e6, df['latency']*1000/2)))

    for model_idx, (model_path, ls, marker, color) in enumerate(
            zip(model_paths, ["-", "--", "-.", "-", "-", "--", "-.", ":"],
                ["x", "s", "v", '*', '+', '^', '>', '1'],
                ["C3", "C3", "C3", "C1", "C1", "C1", "C1", "C1"])):
        model_name = os.path.basename(os.path.dirname(model_path))
        aurora_rewards = []
        # aurora_scores = []
        # detect latest n models here
        last_n_model_paths = get_last_n_models(model_path, args.n_models)
        # construct n Aurora objects and load aurora models here
        last_n_auroras = []
        for tmp_model_path in last_n_model_paths:
            last_n_auroras.append(
                Aurora(seed=args.seed, log_dir="", timesteps_per_actorbatch=10,
                       pretrained_model_path=tmp_model_path,
                       delta_scale=args.delta_scale))

        for (bw, delay, loss, queue) in itertools.product(
                bw_list, delay_list, loss_list, queue_list):
            save_dir = f"{save_root}/rand_{metric}/env_{bw}_{delay}_{loss}_{queue}"
            aurora_save_dir = os.path.join(save_dir, model_name)
            cubic_save_dir = os.path.join(save_dir, "cubic")

            # run aurora
            aurora_rewards.append(
                multiple_runs(last_n_auroras, bw, delay, loss, queue,
                              aurora_save_dir, args.duration, args.plot_only))
        if model_idx == 0:
            plt.plot(config[metric], cubic_rewards, 'o-', c="C0",
                     label="TCP Cubic")
        if train_config_dir is not None:
            train_config = read_json_file(os.path.join(
                train_config_dir, model_name+'.json'))
            env = 'bw=[{}, {}]Mbps, delay=[{}, {}]ms, loss=[{}, {}], queue=[{}, {}]pkt'.format(
                train_config[0]['bandwidth'][0], train_config[0]['bandwidth'][1],
                train_config[0]['delay'][0], train_config[0]['delay'][1],
                train_config[0]['loss'][0], train_config[0]['loss'][1],
                train_config[0]['queue'][0], train_config[0]['queue'][1])
        else:
            env = ""
            # raise RuntimeError
        assert ls in {'', '-', '--', '-.', ':', None}
        plt.plot(config[metric], aurora_rewards, marker=marker,
                 linestyle=ls, c=color, label=model_name + ", " + env)
    plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.2), loc="lower left",
               mode="expand", ncol=1, )
    if metric == "bandwidth":
        unit = "Mbps"
    elif metric == 'delay':
        unit = 'ms'
    elif metric == 'loss':
        unit = ''
    elif metric == 'queue':
        unit = 'packets'
    else:
        raise RuntimeError

    plt.xlabel("{} ({})".format(metric, unit))
    plt.ylabel('Reward')
    # plt.ylabel('log(throughput) - log(delay)')
    plt.tight_layout()
    plt.savefig(os.path.join(
        args.save_dir, "rand_{}_sim.png".format(metric)))
    plt.close()
Esempio n. 15
0
def main():
    args = parse_args()
    dim0_vals, dim0_ticks, dim0_ticklabels, dim0_axlabel = get_dim_vals(
        args.dims[0])
    dim1_vals, dim1_ticks, dim1_ticklabels, dim1_axlabel = get_dim_vals(
        args.dims[1])
    fig, axes = plt.subplots(2, 5, figsize=(12, 10))
    max_gap = np.NINF
    min_gap = np.inf

    # tokens = os.path.basename(os.path.dirname(args.root)).split('_')
    # overfit_config0_dim0_idx = int(tokens[1])
    # overfit_config0_dim1_idx = int(tokens[2])
    # overfit_config1_dim0_idx = int(tokens[4])
    # overfit_config1_dim1_idx = int(tokens[5])
    gap_matrices = []
    with open('heatmap_trace_cnt_ratio.npy', 'rb') as f:
        cnt_ratio = np.load(f)
    bo_range = range(0, 30, 3)
    for bo in bo_range:
        results = []
        std_mat = np.zeros((len(dim0_vals), len(dim1_vals)))
        for i in range(len(dim0_vals)):
            row = []
            for j in range(len(dim1_vals)):
                # if (i != overfit_config0_dim0_idx and j != overfit_config0_dim1_idx) or (i != overfit_config1_dim0_idx and j != overfit_config1_dim1_idx):
                #     continue
                gaps = []
                cnt = 10
                # if cnt_ratio[i, j] > 1:
                #     cnt *= int(cnt_ratio[i, j])
                for k in range(cnt):
                    trace_dir = os.path.join(
                        args.root, "{}_vs_{}/pair_{}_{}/trace_{}".format(
                            args.dims[0], args.dims[1], i, j, k))
                    # if os.path.exists(os.path.join(trace_dir, args.heuristic, '{}_summary.csv'.format(args.heuristic))):
                    # if (i == overfit_config0_dim0_idx and j == overfit_config0_dim1_idx):
                    df = load_summary(
                        os.path.join(trace_dir, args.heuristic,
                                     '{}_summary.csv'.format(args.heuristic)))
                    # else:
                    #     df = {'{}_level_reward'.format(
                    # args.reward_level): 0}
                    heuristic_reward = df['{}_level_reward'.format(
                        args.reward_level)]
                    if args.rl == 'pretrained':
                        df = load_summary(
                            os.path.join(trace_dir, 'pretrained',
                                         'aurora_summary.csv'))
                    elif args.rl == 'overfit_config':
                        if bo == 0:
                            # if os.path.exists(os.path.join(
                            #     trace_dir, 'overfit_config', 'aurora_summary.csv')):
                            # if (i == overfit_config0_dim0_idx and j == overfit_config0_dim1_idx):
                            df = load_summary(
                                os.path.join(trace_dir, 'overfit_config',
                                             'aurora_summary.csv'))
                            # else:
                            #     df = {'{}_level_reward'.format(
                            # args.reward_level): 0}
                        # elif bo == 3:
                        #     df = load_summary(os.path.join(
                        #         trace_dir, 'overfit_config', 'aurora_summary.csv'))
                        else:
                            continue
                    else:
                        df = load_summary(
                            os.path.join(trace_dir, args.rl,
                                         'seed_{}'.format(args.seed),
                                         "bo_{}".format(bo), 'step_64800',
                                         'aurora_summary.csv'))
                    genet_reward = df['{}_level_reward'.format(
                        args.reward_level)]
                    gaps.append(genet_reward - heuristic_reward)
                row.append(np.mean(gaps))
                if np.mean(gaps) < 0:
                    # std_mat[i, j] = int((compute_std_of_mean(gaps) / 12.5)**2)
                    std_mat[i, j] = compute_std_of_mean(gaps)
                max_gap = max(max_gap, np.mean(gaps))
                min_gap = min(min_gap, np.mean(gaps))
            results.append(row)
        results = np.array(results)
        # with open('heatmap_trace_cnt_ratio.npy', 'wb') as f:
        #     np.save(f, std_mat)
        gap_matrices.append(results)

    for subplot_idx, (gap_matrix, bo, ax) in enumerate(
            zip(gap_matrices, bo_range, axes.flatten())):
        im = ax.imshow(gap_matrix)
        im.set_clim(vmax=0, vmin=-200)

        if args.rl != 'pretrained' and args.rl != 'overfit_config':
            selected_configs = read_json_file(
                os.path.join(args.models_path, 'bo_{}.json'.format(bo)))

            selected_dim1_idxs = []
            selected_dim0_idxs = []
            for selected_config in selected_configs[1:]:
                selected_dim1_idxs.append(
                    find_idx(selected_config[args.dims[1]][0], dim1_vals))
                selected_dim0_idxs.append(
                    find_idx(selected_config[args.dims[0]][0], dim0_vals))

            ax.scatter(selected_dim1_idxs,
                       selected_dim0_idxs,
                       marker='o',
                       c='r')
        if subplot_idx == 0 or subplot_idx == 5:
            ax.set_yticks(dim0_ticks)
            ax.set_yticklabels(dim0_ticklabels)
            ax.set_ylabel(dim0_axlabel)
        else:
            ax.set_yticks([])
        ax.set_xticks(dim1_ticks)
        ax.set_xticklabels(dim1_ticklabels)
        if subplot_idx == 2 or subplot_idx == 7:
            ax.set_xlabel(dim1_axlabel)

        if args.rl == 'pretrained':
            ax.set_title("pretrained")
            # plt.savefig(os.path.join(args.root, '{}_vs_{}'.format(args.dims[0], args.dims[1]),
            #                          '{}_{}_{}_level_reward_heatmap.jpg'.format(args.rl, args.heuristic, args.reward_level)))
            break
        elif args.rl == 'overfit_config':
            tokens = os.path.basename(os.path.dirname(args.root)).split('_')
            overfit_config0_dim0_idx = int(tokens[1])
            overfit_config0_dim1_idx = int(tokens[2])
            # overfit_config1_dim0_idx = int(tokens[4])
            # overfit_config1_dim1_idx = int(tokens[5])
            ax.scatter(overfit_config0_dim1_idx,
                       overfit_config0_dim0_idx,
                       marker='.',
                       c='r',
                       s=2)
            # ax.scatter(overfit_config1_dim1_idx, overfit_config1_dim0_idx, marker='x', c='r', s=2)
        else:
            ax.set_title("BO {}".format(bo))
            # plt.savefig(os.path.join(args.root, '{}_vs_{}'.format(args.dims[0], args.dims[1]),
            #                          '{}_{}_bo_{}_{}_level_reward_heatmap.jpg'.format(args.rl, args.heuristic, bo, args.reward_level)))
    cbar = fig.colorbar(im, ax=axes, location='bottom')
    cbar.ax.set_xlabel("{} - {}".format(args.rl, args.heuristic), rotation=0)
    # fig.tight_layout()
    plt.savefig(
        os.path.join(
            args.root, '{}_vs_{}'.format(args.dims[0], args.dims[1]),
            '{}_{}_{}_level_reward_seed_{}_heatmap.jpg'.format(
                args.rl, args.heuristic, args.reward_level, args.seed)))
    plt.close()