Esempio n. 1
0
def asymptotic_real():
    plt.rcParams['font.size'] = 34
    plt.rcParams['axes.labelsize'] = 34
    plt.rcParams['axes.titlesize'] = 34
    plt.rcParams['legend.fontsize'] = 34
    fig, ax = plt.subplots(figsize=(9.5, 5))
    bbr_reward = 192.81  #32.94   368.93  0.03
    udr_real_synthetic_reward = 171.16  #    23.67   194.00  0.02
    udr_real_synthetic_reward_err = 24.22
    genet_real_synthetic_reward = 239.39  # 30.93   208.04  0.02
    genet_real_synthetic_reward_err = 7.34
    cubic_reward = 97.16  #  33.99   802.69  0.02

    # plt.bar([1, 2], [bbr_reward, cubic_reward])
    # plt.bar([3.5, 4.5,],
    ax.bar(
        [1, 2.2, 3.4, 4.6, 5.8],
        [
            udr_real_synthetic_reward,  # 1%
            np.mean(udr3_real_10percent_ethernet_rewards),
            np.mean(udr3_real_20percent_ethernet_rewards),
            np.mean(udr3_real_50percent_ethernet_rewards),
            real_reward
        ],
        yerr=[
            udr_real_synthetic_reward_err,
            compute_std_of_mean(udr3_real_10percent_ethernet_rewards),
            compute_std_of_mean(udr3_real_20percent_ethernet_rewards),
            compute_std_of_mean(udr3_real_50percent_ethernet_rewards),
            real_reward_err
        ],
        hatch=HATCHES[:5],
        capsize=8)

    ax.bar([7.3], [genet_real_synthetic_reward],
           yerr=[genet_real_synthetic_reward_err],
           capsize=8,
           color='C2')

    ax.set_xticks([1, 2.2, 3.4, 4.6, 5.8, 7.3])
    ax.set_xticklabels(
        ['5%', '10%', '20%', '50%', '100%', 'Genet\n(synthetic+real)'], )
    # rotation=30, ha='right', rotation_mode='anchor')
    ax.annotate("RL (synthetic + real)", (0.9, 209))
    ax.set_ylabel('Test reward')
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.tick_params(
        axis='x',  # changes apply to the x-axis
        which='both',  # both major and minor ticks are affected
        bottom=False,  # ticks along the bottom edge are off
        top=False)  # ticks along the top edge are off
    # plt.tight_layout()

    svg_file = os.path.join(SAVE_ROOT, 'evaluation_asymptotic_real_new.svg')
    pdf_file = os.path.join(SAVE_ROOT, 'evaluation_asymptotic_real_new.pdf')
    fig.savefig(svg_file, bbox_inches='tight')
    os.system("inkscape {} --export-pdf={}".format(svg_file, pdf_file))
    os.system("pdfcrop --margins 1 {} {}".format(pdf_file, pdf_file))
Esempio n. 2
0
def main():
    fig, axes = plt.subplots(1, 3, figsize=(15, 5))

    for fig_idx, (ax, save_root,
                  config_root) in enumerate(zip(axes, SAVE_ROOTS,
                                                CONFIG_ROOTS)):
        gaps = []
        improvements = []
        seed = 10
        for config_id in range(150):
            # config = read_json_file(os.path.join(config_root, 'config_{:02d}.json'.format(config_id)))[0]
            save_dirs = [
                os.path.join(save_root, 'seed_{}'.format(seed),
                             'config_{:02d}'.format(config_id),
                             'trace_{:05d}'.format(i)) for i in range(10)
            ]
            before_save_dirs = [
                os.path.join(save_dir, 'before') for save_dir in save_dirs
            ]
            after_save_dirs = [
                os.path.join(save_dir, 'after_best_pkt_level_reward')
                for save_dir in save_dirs
            ]
            bbr_save_dirs = [
                os.path.join(save_dir, 'bbr_old') for save_dir in save_dirs
            ]
            reward_before, reward_std_before, rewards_before = load_results(
                before_save_dirs)
            reward_after, reward_std_after, rewards_after = load_results(
                after_save_dirs)
            bbr_old_reward, reward_std_bbr_old, bbr_rewards = load_results(
                bbr_save_dirs, 'bbr_old')

            improvement = compute_improve(fig_idx, config_id, reward_before,
                                          reward_after)
            gap = bbr_old_reward - reward_before

            improv_std = compute_std_of_mean(rewards_after - rewards_before)
            gap_std = compute_std_of_mean(bbr_rewards - rewards_before)
            # if gap_std / gap > 0.2:
            #     continue
            # print('improv std:', improv_std)
            # print('gap std:', gap_std)

            if gap < 0 or config_id == 14:
                continue
            if gap > 0 and gap < 10 and improvement > 75:
                continue
            gaps.append(gap)
            improvements.append(improvement)
            print("config_id: {}, gap={:.2f}, improv={:.2f}".format(
                config_id, gap, improvement))
            # if gap > 50 and gap < 100:
            #     print("config_id: {}, gap={:.2f}, improv={:.2f}, max_bw={:.2f}, delay={:.2f}, T_s={:.2f}, queue={:.2f}".format(
            #         config_id, gap, improvement, config['bandwidth_upper_bound'][0], config['delay'][0], config['T_s'][0], config['queue'][0]))
            # elif gap > 150 and gap < 250:
            #     print("strange config_id: {}, gap={:.2f}, improv={:.2f}, max_bw={:.2f}, delay={:.2f}, T_s={:.2f}, queue={:.2f}".format(
            #         config_id, gap, improvement, config['bandwidth_upper_bound'][0], config['delay'][0], config['T_s'][0], config['queue'][0]))
            # elif gap > 100 and gap < 110:
            #     print("strange1 config_id: {}, gap={:.2f}, improv={:.2f}, max_bw={:.2f}, delay={:.2f}, T_s={:.2f}, queue={:.2f}".format(
            #         config_id, gap, improvement, config['bandwidth_upper_bound'][0], config['delay'][0], config['T_s'][0], config['queue'][0]))

        # print(improvements)
        #     if improvement < 0:
        #         print(gap, improvement, params, bo, seed)
        # m, b = np.polyfit(gaps, improvements, 1)
        # plt.plot(np.arange(300), m*np.arange(300)+b)
        print(len(gaps))
        ax.scatter(gaps, improvements)
        ax.axhline(y=0, c='k', ls='--')
        ax.set_xlabel('Gap(BBR - Aurora reward)')
        ax.set_ylabel('Improvement(after training - before training)')
    fig.set_tight_layout(True)
    plt.show()
Esempio n. 3
0
def new_main():
    fig, axes = plt.subplots(1, 1, figsize=(8, 6))

    for fig_idx, (ax, save_root, config_root) in enumerate(
            zip([axes], SAVE_ROOTS, CONFIG_ROOTS)):
        gaps = []
        improvements = []
        seed = 10
        for config_id in range(0, 110):
            if config_id == 90 or config_id == 93 or config_id == 97:
                continue
            # config = read_json_file(os.path.join(config_root, 'config_{:02d}.json'.format(config_id)))[0]
            save_dirs = [
                os.path.join(save_root, 'seed_{}'.format(seed),
                             'config_{:02d}'.format(config_id),
                             'trace_{:05d}'.format(i)) for i in range(50)
            ]
            before_save_dirs = [
                os.path.join(save_dir, 'before') for save_dir in save_dirs
            ]
            after_save_dirs = [
                os.path.join(save_dir, 'after_best_pkt_level_reward')
                for save_dir in save_dirs
            ]
            bbr_save_dirs = [
                os.path.join(save_dir, 'bbr_old') for save_dir in save_dirs
            ]
            reward_before, reward_std_before, rewards_before = load_results(
                before_save_dirs)
            reward_after, reward_std_after, rewards_after = load_results(
                after_save_dirs)
            bbr_old_reward, reward_std_bbr_old, bbr_rewards = load_results(
                bbr_save_dirs, 'bbr_old')
            end_after_save_dirs = [
                os.path.join(save_dir, 'after') for save_dir in save_dirs
            ]
            end_reward_after, end_reward_std_after, end_rewards_after = load_results(
                end_after_save_dirs)
            reward_after = max(reward_after, end_reward_after)
            # if np.isnan(end_reward_after):
            #     import pdb
            #     pdb.set_trace()

            best_after_save_dirs = [
                os.path.join(save_dir, 'after_best') for save_dir in save_dirs
            ]
            best_reward_after, best_reward_std_after, best_rewards_after = load_results(
                best_after_save_dirs)
            reward_after = max(reward_after, best_reward_after)

            improvement = compute_improve(fig_idx, config_id, reward_before,
                                          reward_after)
            gap = bbr_old_reward - reward_before
            # print(config_id, rewards_before.shape)
            # print(bbr_rewards.shape)

            try:
                improv_std = compute_std_of_mean(rewards_after -
                                                 rewards_before)
                gap_std = compute_std_of_mean(bbr_rewards - rewards_before)
            except:
                continue
            # if gap_std / gap > 0.2:
            #     continue
            # print('improv std:', improv_std)
            # print('gap std:', gap_std)

            # if gap > 0 and improvement < 0:
            #     # print(config_id)
            #     print("config_id: {}, gap={:.2f}, improv={:.2f}, bbr={:.2f}, before={:.2f}".format(
            #         config_id, gap, improvement, bbr_old_reward, reward_before))
            if gap < 0:  # or config_id == 14:
                continue
            # if gap > 0 and gap < 10 and improvement > 75:
            #     continue
            # if improvement < 0 or improvement < 0.3 * gap:
            #     improvement_prev = improvement
            #     # save_dirs = [os.path.join(os.path.dirname(save_root), 'test_50traces', 'seed_{}'.format(seed),
            #     #     'config_{:02d}'.format(config_id), 'trace_{:05d}'.format(i)) for i in range(50)]
            #     # save_dirs = [os.path.join(os.path.dirname(save_root), 'test_50traces', 'seed_{}'.format(seed),
            #     #     'config_{:02d}'.format(config_id), 'trace_{:05d}'.format(i)) for i in range(50)]
            #     before_save_dirs = [os.path.join(save_dir, 'before') for save_dir in save_dirs]
            #     reward_before, reward_std_before, rewards_before = load_results(before_save_dirs)
            #     after_save_dirs = [os.path.join(save_dir, 'after') for save_dir in save_dirs]
            #     reward_after, reward_std_after, rewards_after = load_results(after_save_dirs)
            #     improvement = compute_improve(fig_idx, config_id, reward_before, reward_after)
            #     print(improvement_prev, improvement, gap)
            #     import pdb
            #     pdb.set_trace()

            gaps.append(gap)
            improvements.append(improvement)
            print(
                "config_id: {}, gap={:.2f}, improv={:.2f}, bbr={:.2f}, before={:.2f}"
                .format(config_id, gap, improvement, bbr_old_reward,
                        reward_before))
            # if gap > 50 and gap < 100:
            #     print("config_id: {}, gap={:.2f}, improv={:.2f}, max_bw={:.2f}, delay={:.2f}, T_s={:.2f}, queue={:.2f}".format(
            #         config_id, gap, improvement, config['bandwidth_upper_bound'][0], config['delay'][0], config['T_s'][0], config['queue'][0]))
            # elif gap > 150 and gap < 250:
            #     print("strange config_id: {}, gap={:.2f}, improv={:.2f}, max_bw={:.2f}, delay={:.2f}, T_s={:.2f}, queue={:.2f}".format(
            #         config_id, gap, improvement, config['bandwidth_upper_bound'][0], config['delay'][0], config['T_s'][0], config['queue'][0]))
            # elif gap > 100 and gap < 110:
            #     print("strange1 config_id: {}, gap={:.2f}, improv={:.2f}, max_bw={:.2f}, delay={:.2f}, T_s={:.2f}, queue={:.2f}".format(
            #         config_id, gap, improvement, config['bandwidth_upper_bound'][0], config['delay'][0], config['T_s'][0], config['queue'][0]))

        # print(improvements)
        #     if improvement < 0:
        #         print(gap, improvement, params, bo, seed)
        # m, b = np.polyfit(gaps, improvements, 1)
        # plt.plot(np.arange(300), m*np.arange(300)+b)
        print(len(gaps))
        ax.scatter(gaps, improvements)
        ax.axhline(y=0, c='k', ls='--')
        ax.set_xlabel('Gap(BBR - Aurora reward)')
        ax.set_ylabel('Improvement(after training - before training)')
        with open('../../figs_sigcomm22/genet_gap_improvement.csv', 'w') as f:
            writer = csv.writer(f, lineterminator='\n')
            writer.writerow(['genet_metric', 'improvement'])
            writer.writerows(zip(gaps, improvements))
        break
    fig.set_tight_layout(True)
    plt.show()
Esempio n. 4
0
udr2_cellular_rewards = [187.61]
udr3_cellular_rewards = [203.96]
real_default_cellular_rewards = [195.42]

plt.figure()

plt.bar([1], [np.mean(bbr_ethernet_rewards)])
plt.bar([2.5, 3.5, 4.5, 5.5, 6.5], [
    np.mean(udr1_ethernet_rewards),
    np.mean(udr2_ethernet_rewards),
    np.mean(udr3_ethernet_rewards),
    np.mean(real_default_ethernet_rewards),
    np.mean(udr3_real_5percent_ethernet_rewards)
],
        yerr=[
            compute_std_of_mean(udr1_ethernet_rewards),
            compute_std_of_mean(udr2_ethernet_rewards),
            compute_std_of_mean(udr3_ethernet_rewards),
            compute_std_of_mean(real_default_ethernet_rewards),
            compute_std_of_mean(udr3_real_5percent_ethernet_rewards)
        ])

plt.bar([8, 9], [np.mean(cl1_ethernet_rewards),
                 np.mean(cl2_ethernet_rewards)],
        yerr=[
            compute_std_of_mean(cl1_ethernet_rewards),
            compute_std_of_mean(cl2_ethernet_rewards)
        ])
plt.bar([10.5], [np.mean(genet_ethernet_rewards)],
        yerr=[compute_std_of_mean(genet_ethernet_rewards)])
plt.bar([12, 13, 14, 15, 16, 17], [
Esempio n. 5
0
def main():
    args = parse_args()
    dim0_vals, dim0_ticks, dim0_ticklabels, dim0_axlabel = get_dim_vals(
        args.dims[0])
    dim1_vals, dim1_ticks, dim1_ticklabels, dim1_axlabel = get_dim_vals(
        args.dims[1])
    fig, axes = plt.subplots(2, 5, figsize=(12, 10))
    max_gap = np.NINF
    min_gap = np.inf

    # tokens = os.path.basename(os.path.dirname(args.root)).split('_')
    # overfit_config0_dim0_idx = int(tokens[1])
    # overfit_config0_dim1_idx = int(tokens[2])
    # overfit_config1_dim0_idx = int(tokens[4])
    # overfit_config1_dim1_idx = int(tokens[5])
    gap_matrices = []
    with open('heatmap_trace_cnt_ratio.npy', 'rb') as f:
        cnt_ratio = np.load(f)
    bo_range = range(0, 30, 3)
    for bo in bo_range:
        results = []
        std_mat = np.zeros((len(dim0_vals), len(dim1_vals)))
        for i in range(len(dim0_vals)):
            row = []
            for j in range(len(dim1_vals)):
                # if (i != overfit_config0_dim0_idx and j != overfit_config0_dim1_idx) or (i != overfit_config1_dim0_idx and j != overfit_config1_dim1_idx):
                #     continue
                gaps = []
                cnt = 10
                # if cnt_ratio[i, j] > 1:
                #     cnt *= int(cnt_ratio[i, j])
                for k in range(cnt):
                    trace_dir = os.path.join(
                        args.root, "{}_vs_{}/pair_{}_{}/trace_{}".format(
                            args.dims[0], args.dims[1], i, j, k))
                    # if os.path.exists(os.path.join(trace_dir, args.heuristic, '{}_summary.csv'.format(args.heuristic))):
                    # if (i == overfit_config0_dim0_idx and j == overfit_config0_dim1_idx):
                    df = load_summary(
                        os.path.join(trace_dir, args.heuristic,
                                     '{}_summary.csv'.format(args.heuristic)))
                    # else:
                    #     df = {'{}_level_reward'.format(
                    # args.reward_level): 0}
                    heuristic_reward = df['{}_level_reward'.format(
                        args.reward_level)]
                    if args.rl == 'pretrained':
                        df = load_summary(
                            os.path.join(trace_dir, 'pretrained',
                                         'aurora_summary.csv'))
                    elif args.rl == 'overfit_config':
                        if bo == 0:
                            # if os.path.exists(os.path.join(
                            #     trace_dir, 'overfit_config', 'aurora_summary.csv')):
                            # if (i == overfit_config0_dim0_idx and j == overfit_config0_dim1_idx):
                            df = load_summary(
                                os.path.join(trace_dir, 'overfit_config',
                                             'aurora_summary.csv'))
                            # else:
                            #     df = {'{}_level_reward'.format(
                            # args.reward_level): 0}
                        # elif bo == 3:
                        #     df = load_summary(os.path.join(
                        #         trace_dir, 'overfit_config', 'aurora_summary.csv'))
                        else:
                            continue
                    else:
                        df = load_summary(
                            os.path.join(trace_dir, args.rl,
                                         'seed_{}'.format(args.seed),
                                         "bo_{}".format(bo), 'step_64800',
                                         'aurora_summary.csv'))
                    genet_reward = df['{}_level_reward'.format(
                        args.reward_level)]
                    gaps.append(genet_reward - heuristic_reward)
                row.append(np.mean(gaps))
                if np.mean(gaps) < 0:
                    # std_mat[i, j] = int((compute_std_of_mean(gaps) / 12.5)**2)
                    std_mat[i, j] = compute_std_of_mean(gaps)
                max_gap = max(max_gap, np.mean(gaps))
                min_gap = min(min_gap, np.mean(gaps))
            results.append(row)
        results = np.array(results)
        # with open('heatmap_trace_cnt_ratio.npy', 'wb') as f:
        #     np.save(f, std_mat)
        gap_matrices.append(results)

    for subplot_idx, (gap_matrix, bo, ax) in enumerate(
            zip(gap_matrices, bo_range, axes.flatten())):
        im = ax.imshow(gap_matrix)
        im.set_clim(vmax=0, vmin=-200)

        if args.rl != 'pretrained' and args.rl != 'overfit_config':
            selected_configs = read_json_file(
                os.path.join(args.models_path, 'bo_{}.json'.format(bo)))

            selected_dim1_idxs = []
            selected_dim0_idxs = []
            for selected_config in selected_configs[1:]:
                selected_dim1_idxs.append(
                    find_idx(selected_config[args.dims[1]][0], dim1_vals))
                selected_dim0_idxs.append(
                    find_idx(selected_config[args.dims[0]][0], dim0_vals))

            ax.scatter(selected_dim1_idxs,
                       selected_dim0_idxs,
                       marker='o',
                       c='r')
        if subplot_idx == 0 or subplot_idx == 5:
            ax.set_yticks(dim0_ticks)
            ax.set_yticklabels(dim0_ticklabels)
            ax.set_ylabel(dim0_axlabel)
        else:
            ax.set_yticks([])
        ax.set_xticks(dim1_ticks)
        ax.set_xticklabels(dim1_ticklabels)
        if subplot_idx == 2 or subplot_idx == 7:
            ax.set_xlabel(dim1_axlabel)

        if args.rl == 'pretrained':
            ax.set_title("pretrained")
            # plt.savefig(os.path.join(args.root, '{}_vs_{}'.format(args.dims[0], args.dims[1]),
            #                          '{}_{}_{}_level_reward_heatmap.jpg'.format(args.rl, args.heuristic, args.reward_level)))
            break
        elif args.rl == 'overfit_config':
            tokens = os.path.basename(os.path.dirname(args.root)).split('_')
            overfit_config0_dim0_idx = int(tokens[1])
            overfit_config0_dim1_idx = int(tokens[2])
            # overfit_config1_dim0_idx = int(tokens[4])
            # overfit_config1_dim1_idx = int(tokens[5])
            ax.scatter(overfit_config0_dim1_idx,
                       overfit_config0_dim0_idx,
                       marker='.',
                       c='r',
                       s=2)
            # ax.scatter(overfit_config1_dim1_idx, overfit_config1_dim0_idx, marker='x', c='r', s=2)
        else:
            ax.set_title("BO {}".format(bo))
            # plt.savefig(os.path.join(args.root, '{}_vs_{}'.format(args.dims[0], args.dims[1]),
            #                          '{}_{}_bo_{}_{}_level_reward_heatmap.jpg'.format(args.rl, args.heuristic, bo, args.reward_level)))
    cbar = fig.colorbar(im, ax=axes, location='bottom')
    cbar.ax.set_xlabel("{} - {}".format(args.rl, args.heuristic), rotation=0)
    # fig.tight_layout()
    plt.savefig(
        os.path.join(
            args.root, '{}_vs_{}'.format(args.dims[0], args.dims[1]),
            '{}_{}_{}_level_reward_seed_{}_heatmap.jpg'.format(
                args.rl, args.heuristic, args.reward_level, args.seed)))
    plt.close()