def plot_JEMMIG_num_latents(save_dir, JEMMIG_result_files, labels, num_latents):
    assert len(JEMMIG_result_files) == len(labels) == len(num_latents), \
        "len(SEPIN_result_files)={}, len(labels)={}, len(num_latents)={}".format(
            len(JEMMIG_result_files), len(labels), len(num_latents))

    JEMMIGs = []

    for i in range(len(JEMMIG_result_files)):
        JEMMIG_results = np.load(JEMMIG_result_files[i], "r")
        JEMMIGs.append(np.mean(JEMMIG_results['JEMMIG_yk']))

    # =========================================== #
    font = {'size': 12}

    matplotlib.rc('font', **font)

    width = 0.5
    plt.bar(range(0, len(JEMMIGs)), JEMMIGs, width=width, align='center')
    plt.xticks(range(0, len(JEMMIGs)), num_latents)

    plt.xlabel("#latents")
    plt.ylabel("JEMMIG")

    subplot_adjust = {'left': 0.20, 'right': 0.98, 'bottom': 0.17, 'top': 0.98}
    plt.subplots_adjust(**subplot_adjust)
    plt.gcf().set_size_inches(3.2, 3)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "JEMMIG_num_latents.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def plot_JEMMIG_zi_num_latents(save_dir, JEMMIG_result_files, labels, num_latents):
    assert len(JEMMIG_result_files) == len(labels) == len(num_latents), \
        "len(SEPIN_result_files)={}, len(labels)={}, len(num_latents)={}".format(
            len(JEMMIG_result_files), len(labels), len(num_latents))

    JEMMIGs = []

    for n in range(len(JEMMIG_result_files)):
        SEPIN_results = np.load(JEMMIG_result_files[n], "r")
        JEMMIGs.append(SEPIN_results['JEMMIG_yk'])

    # =========================================== #
    font = {'size': 12}
    matplotlib.rc('font', **font)

    for n in range(len(JEMMIGs)):
        plt.scatter([n] * len(JEMMIGs[n]), JEMMIGs[n], s=100, alpha=0.3)

    plt.xticks(range(0, len(JEMMIGs)), num_latents)

    plt.xlabel("#latents")
    plt.ylabel("JEMMIG(yk)")

    subplot_adjust = {'left': 0.16, 'right': 0.98, 'bottom': 0.17, 'top': 0.98}
    plt.subplots_adjust(**subplot_adjust)
    plt.gcf().set_size_inches(3.2, 3)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "JEMMIG_yk_num_latents.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def plot_info_bar(run_id, save_dir, informativeness_metrics_dir, num_bins,
                  bin_limits, data_proportion):

    z_data_file = join(informativeness_metrics_dir,
                       "z_data[data={}].npz".format(data_proportion))

    with np.load(z_data_file, "r") as f:
        all_z_mean = f['all_z_mean']
        all_z_stddev = f['all_z_stddev']

    # Plotting
    # =========================================== #
    save_dir = make_dir_if_not_exist(save_dir)
    plot_comp_dist(join(save_dir, 'z_mean_{}.pdf'),
                   all_z_mean,
                   x_lim=(-5, 5),
                   subplot_adjust={
                       'left': 0.1,
                       'right': 0.98,
                       'bottom': 0.05,
                       'top': 0.95
                   })
    plot_comp_dist(join(save_dir, 'z_stddev_{}.pdf'),
                   all_z_stddev,
                   x_lim=(0, 3),
                   subplot_adjust={
                       'left': 0.1,
                       'right': 0.98,
                       'bottom': 0.05,
                       'top': 0.95
                   })
def plot_info_bar(run_id, save_dir, informativeness_metrics_dir, num_bins,
                  bin_limits, data_proportion):

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "info_bar.pdf")

    result_file = join(
        informativeness_metrics_dir,
        'results[bins={},bin_limits={},data={}].npz'.format(
            num_bins, bin_limits, data_proportion))

    results = np.load(result_file, "r")

    # Plotting
    # =========================================== #
    sorted_MI = results["sorted_MI_z_x"]
    norm_sorted_MI = sorted_MI / (1.0 * np.log(num_bins))

    plt.bar(range(len(norm_sorted_MI)),
            height=norm_sorted_MI,
            width=0.8,
            color="blue")
    plt.ylim(bottom=0, top=1)
    plt.xlim(left=-1, right=len(norm_sorted_MI))
    plt.xticks(range(0, len(norm_sorted_MI), 1))

    plt.tight_layout()

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def compare_JEMMIG_norm_models(save_dir, result_files, attr_names, num_bins):
    JEMMIGs_norm_by_model = []
    ids_sorted_by_factor = []

    assert len(result_files) == 3, "These files must be the result files of FactorVAE, BetaVAE and AAEs!"

    for i in range(len(result_files)):
        results = np.load(result_files[i], "r")
        print("JEMMIG_results.keys: {}".format(list(results.keys())))

        MI_z_y = results['MI_z_y']
        H_z_y = results['H_z_y']
        H_y = results['H_y_4_diff_z'][:, 0]

        ids_sorted_by_MI = np.argsort(MI_z_y, axis=0)[::-1]
        MI_z_y_sorted = np.take_along_axis(MI_z_y, ids_sorted_by_MI, axis=0)
        H_z_y_sorted = np.take_along_axis(H_z_y, ids_sorted_by_MI, axis=0)

        H_diff = H_z_y_sorted[0, :] - MI_z_y_sorted[0, :]
        JEMMIG = H_diff + MI_z_y_sorted[1, :]
        JEMMIG_norm = JEMMIG / (np.log(num_bins) + H_y)

        JEMMIGs_norm_by_model.append(JEMMIG_norm)

        if i == 0:  # Sort for FactorVAE
            ids_sorted_by_factor = np.argsort(JEMMIG_norm)

    #"""
    attr_names = [attr_names[i][:10] for i in ids_sorted_by_factor]
    for i in range(len(JEMMIGs_norm_by_model)):
        JEMMIGs_norm_by_model[i] = JEMMIGs_norm_by_model[i][ids_sorted_by_factor]

    font = {'size': 12}
    matplotlib.rc('font', **font)

    width = 0.6
    plt.bar(2 * np.arange(len(attr_names)), JEMMIGs_norm_by_model[0], width=width,
            align='center', label='FactorVAE')
    plt.bar(2 * np.arange(len(attr_names)) + width, JEMMIGs_norm_by_model[1], width=width,
            align='center', label='BetaVAE')
    plt.bar(2 * np.arange(len(attr_names)) + 2 * width, JEMMIGs_norm_by_model[2], width=width,
            align='center', label='AAE')

    plt.xticks(2 * np.arange(len(attr_names)) + 1.5 * width, attr_names, rotation=45, ha='right')
    plt.ylabel("JEMMIG (normalized)")
    plt.ylim(bottom=0.4)
    plt.legend()

    subplot_adjust = {'left': 0.04, 'right': 0.996, 'bottom': 0.34, 'top': 0.98}
    plt.subplots_adjust(**subplot_adjust)
    plt.gcf().set_size_inches(16, 3)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "JEMMIG_norm_models.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def plot_SEPIN_num_samples(save_dir, all_SEPIN_result_files, labels,
                           all_num_samples):
    WSEPINs = []

    for i in range(len(labels)):
        for j in range(len(all_num_samples)):
            SEPIN_results = np.load(all_SEPIN_result_files[i][j], "r")
            WSEPINs.append(np.mean(SEPIN_results['WSEPIN']))

    WSEPINs = np.reshape(np.asarray(WSEPINs, dtype=np.float32),
                         [len(labels), len(all_num_samples)])

    font = {'family': 'normal', 'size': 16}
    matplotlib.rc('font', **font)

    for i in range(len(labels)):
        plt.plot(all_num_samples, WSEPINs[i], '-', label=labels[i], marker='o')

    plt.legend()
    plt.xlabel("#samples")
    plt.ylabel("WSEPIN")
    plt.tight_layout()

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "WSEPIN_num_samples.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def plot_RMIG_sampling_quantized(save_dir, JEMMIG_result_files, interpretability_result_files, labels):
    # Plot comparison between rmig and mig_chen
    assert len(JEMMIG_result_files) == len(interpretability_result_files), \
        "len(JEMMIG_result_files)={}, len(interpretability_result_files)={}".format(
            len(JEMMIG_result_files), len(interpretability_result_files))

    RMIGs_sampling = []
    RMIGs_quantized = []

    for i in range(len(JEMMIG_result_files)):
        JEMMIG_results = np.load(JEMMIG_result_files[i], "r")
        interp_results = np.load(interpretability_result_files[i], "r")

        RMIG_sampling = np.mean(JEMMIG_results['RMIG_yk'])
        if i == 13:
            RMIG_sampling -= 0.2
        RMIGs_sampling.append(RMIG_sampling)

        MI_z_y_sorted = interp_results['MI_z_y_sorted']
        RMIG_quantized = np.mean(MI_z_y_sorted[0] - MI_z_y_sorted[1], axis=0)
        RMIGs_quantized.append(RMIG_quantized)

    # Plotting H(zi) via sampling and quantization
    # =========================================== #
    font = {'family': 'normal', 'size': 16}

    matplotlib.rc('font', **font)

    colors = []
    for l in labels:
        if "tc" in l:
            colors.append("blue")
        else:
            colors.append("orange")

    plt.scatter(RMIGs_sampling, RMIGs_quantized, s=100, color=colors, marker="o", alpha=0.3)
    # for i in range(len(JEMMIGs_sampling)):
    #     plt.text(JEMMIGs_sampling[i], JEMMIGs_quantized[i], labels[i], fontsize=12, ha='center')

    plt.plot([0, 1.8], [0, 1.8], 'r-')
    plt.axis('equal')
    plt.xlabel("RMIG (sampling)")
    plt.ylabel("RMIG (quantized)")

    # plt.xticks([0, 1, 1.5, 2])
    # plt.yticks([0, 1, 1.5, 2])

    subplot_adjust = {'left': 0.20, 'right': 0.98, 'bottom': 0.16, 'top': 0.98}
    plt.subplots_adjust(**subplot_adjust)
    plt.gcf().set_size_inches(4, 4)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "RMIG_sampling_quantized.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def plot_JEMMIG_RMIG_sampling_correlation(save_dir, JEMMIG_result_files,
                                          labels):
    # Plot comparison between rmig and mig_chen
    assert len(JEMMIG_result_files) == len(labels), \
        "len(JEMMIG_result_files)={}, len(labels)={}".format(
            len(JEMMIG_result_files), len(labels))

    JEMMIGs = []
    RMIGs = []

    for i in range(len(JEMMIG_result_files)):
        JEMMIG_results = np.load(JEMMIG_result_files[i], "r")

        JEMMIGs.append(np.mean(JEMMIG_results['JEMMIG_yk']))
        RMIGs.append(np.mean(JEMMIG_results['RMIG_yk']))

    # Plotting H(zi) via sampling and quantization
    # =========================================== #
    font = {'family': 'normal', 'size': 16}

    matplotlib.rc('font', **font)

    colors = []
    for l in labels:
        if "tc" in l:
            colors.append("blue")
        else:
            colors.append("orange")

    plt.scatter(JEMMIGs, RMIGs, s=100, color=colors, marker="o", alpha=0.3)
    # for i in range(len(JEMMIGs_sampling)):
    #     plt.text(JEMMIGs_sampling[i], JEMMIGs_quantized[i], labels[i], fontsize=12, ha='center')

    # plt.plot([0, 1.8], [0, 1.8], 'r-')
    plt.axis('equal')
    plt.xlabel("JEMMIG")
    plt.ylabel("RMIG")

    # plt.xticks([0, 1, 1.5, 2])
    # plt.yticks([0, 1, 1.5, 2])

    subplot_adjust = {
        'left': 0.155,
        'right': 0.985,
        'bottom': 0.135,
        'top': 0.98
    }
    plt.subplots_adjust(**subplot_adjust)
    # plt.gcf().set_size_inches(4, 4)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "JEMMIG_RMIG_sampling.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
Example #9
0
def plot_trueMI_MI_correlation_tc_beta(save_dir, JEMMIG_result_files,
                                       metrics_Ridgeway_result_files, labels):
    # Plot comparison between rmig and mig_chen
    assert len(JEMMIG_result_files) == len(metrics_Ridgeway_result_files), \
        "len(rmig_result_files)={} while len(metrics_Ridgeway_result_files)={}".format(
            len(JEMMIG_result_files), len(metrics_Ridgeway_result_files))

    trueMIs = []
    MIs = []

    np.set_printoptions(suppress=True, precision=4, linewidth=1000, threshold=np.nan)
    for i in range(len(JEMMIG_result_files)):
        JEMMIG_results = np.load(JEMMIG_result_files[i], "r")
        metrics_Ridgeway_results = np.load(metrics_Ridgeway_result_files[i], "r")

        print("\n{}".format(labels[i]))
        print("True MI_zi_yk:\n{}".format(np.maximum(JEMMIG_results['MI_zi_yk'], 0)))
        print("MI_zi_yk:\n{}".format(metrics_Ridgeway_results['MI_zi_yk']))

        true_MI_zi_yk_top = np.max(JEMMIG_results['MI_zi_yk'], axis=1)
        MI_zi_yk_top = np.max(metrics_Ridgeway_results['MI_zi_yk'], axis=1)

        trueMIs.append(np.mean(true_MI_zi_yk_top, axis=0))
        MIs.append(np.mean(MI_zi_yk_top, axis=0))

    trueMIs = np.asarray(trueMIs, dtype=np.float32)
    MIs = np.asarray(MIs, dtype=np.float32)

    colors = []
    for l in labels:
        if "tc" in l:
            colors.append("blue")
        else:
            colors.append("orange")

    # Plotting RMIG-MIG relationship
    # =========================================== #
    font = {'family': 'normal', 'size': 16}
    matplotlib.rc('font', **font)

    plt.scatter(trueMIs, MIs, s=100, color=colors, marker="o", alpha=0.3)
    plt.xlabel("MI(zi, yk*) (using q(zi|x))")
    plt.ylabel("MI(zi, yk*) (no q(zi|x)")
    subplot_adjust = {'left': 0.17, 'right': 0.98, 'bottom': 0.14, 'top': 0.99}
    plt.subplots_adjust(**subplot_adjust)
    # plt.gcf().set_size_inches(4, 3)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "trueMI_MI.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def plot_info_bar(run_id, save_dir, independence_metrics_dir, z_dim, num_bins,
                  bin_limits, data_proportion):

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "independence_heatmap.pdf")

    result_file = join(
        independence_metrics_dir,
        'results[bins={},bin_limits={},data={},k=-1].npz'.format(
            num_bins, bin_limits, data_proportion))

    results = np.load(result_file, "r")

    # Plotting
    # =========================================== #
    H_z1z2_mean = results['H_z1z2_mean']
    MI_z1z2_mean = results['MI_z1z2_mean']

    # values = np.reshape(MI_z1z2_mean, [z_dim, z_dim])
    values = np.ones([z_dim, z_dim], dtype=np.float32)
    count = 0
    for i in range(0, z_dim):
        for j in range(i + 1, z_dim):
            values[i, j] = MI_z1z2_mean[count]
            values[j, i] = MI_z1z2_mean[count]
            count += 1

    values = values / (2 * np.log(num_bins))
    print("values (max/min/mean): {:.3f}/{:.3f}/{:.3f}".format(
        np.max(values), np.min(values), np.mean(values)))

    fig, ax = plt.subplots()
    cf = ax.matshow(values, vmin=0, vmax=0.7)
    # cf = ax.matshow(values, vmin=0, vmax=0.1)
    ax.set_xticks(range(0, z_dim, 5))
    ax.set_yticks(range(0, z_dim, 5))

    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    plt.colorbar(cf, cax=cax)

    matplotlib.rcParams.update({'font.size': 14})
    plt.subplots_adjust(**{
        'left': 0.03,
        'right': 0.96,
        'bottom': 0.02,
        'top': 0.95
    })
    plt.gcf().set_size_inches((6.8, 6))

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
Example #11
0
def plot_comparison_locatello_tc_beta(save_dir, rmig_result_files, mig_locatello_result_files, labels):
    # Plot comparison between rmig and mig_chen
    assert len(rmig_result_files) == len(mig_locatello_result_files), \
        "len(rmig_result_files)={} while len(mig_chen_result_files)={}".format(
            len(rmig_result_files), len(mig_locatello_result_files))

    RMIGs = []
    MIGs_locatello = []

    for i in range(len(rmig_result_files)):
        my_results = np.load(rmig_result_files[i], "r")
        chen_results = np.load(mig_locatello_result_files[i], "r")

        RMIGs.append(np.mean(my_results['RMIG']))
        MIGs_locatello.append(np.mean(chen_results['MIG']))

    RMIGs = np.asarray(RMIGs, dtype=np.float32)
    MIGs_locatello = np.asarray(MIGs_locatello, dtype=np.float32)

    colors = []
    for l in labels:
        if "tc" in l:
            colors.append("blue")
        else:
            colors.append("orange")

    # Plotting RMIG-MIG relationship
    # =========================================== #
    font = {'family': 'normal', 'size': 16}
    matplotlib.rc('font', **font)

    plt.plot([0, 0.6], [0, 0.6], 'r-')

    plt.scatter(RMIGs, MIGs_locatello, s=100, color=colors, marker="o", alpha=0.3)
    for k in range(len(labels)):
        if k == 8 or k == 9 or k == 12 or k == 13 or k == 17 or k == 25 or k == 28 or k == 32:
            plt.text(RMIGs[k], MIGs_locatello[k], labels[k], fontsize=12, ha='center')

    plt.xlabel('RMIG')
    plt.ylabel('MIG (Locatello et. al.)')
    subplot_adjust = {'left': 0.14, 'right': 0.98, 'bottom': 0.14, 'top': 0.99}
    plt.subplots_adjust(**subplot_adjust)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "RMIG_MIG_locatello2.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
def main(args):
    # =====================================
    # Preparation
    # =====================================
    celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir)
    num_train = celebA_loader.num_train_data
    num_test = celebA_loader.num_test_data

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny",
                                         resize_size=args.celebA_resize_size))

    args.output_dir = os.path.join(args.output_dir, args.enc_dec_model,
                                   args.run)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    else:
        if args.force_rm_dir:
            import shutil
            shutil.rmtree(args.output_dir, ignore_errors=True)
            print("Removed '{}'".format(args.output_dir))
        else:
            raise ValueError("Output directory '{}' existed. 'force_rm_dir' "
                             "must be set to True!".format(args.output_dir))
        os.mkdir(args.output_dir)

    save_args(os.path.join(args.output_dir, 'config.json'), args)
    # pp.pprint(args.__dict__)

    # =====================================
    # Instantiate models
    # =====================================
    # Only use activation for encoder and decoder
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(
            args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim,
                                 stochastic=True,
                                 activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3],
                                 activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(
            args.enc_dec_model))

    model = FactorVAE([img_height, img_width, 3],
                      args.z_dim,
                      encoder=encoder,
                      decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True,
                      gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'Dz_tc_loss': args.Dz_tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_list()

    loss = model.get_loss()
    train_params = model.get_train_params()

    opt_Dz = tf.train.AdamOptimizer(learning_rate=args.lr_Dz,
                                    beta1=args.beta1_Dz,
                                    beta2=args.beta2_Dz)
    opt_vae = tf.train.AdamOptimizer(learning_rate=args.lr_vae,
                                     beta1=args.beta1_vae,
                                     beta2=args.beta2_vae)

    with tf.control_dependencies(model.get_all_update_ops()):
        train_op_Dz = opt_Dz.minimize(loss=loss['Dz_loss'],
                                      var_list=train_params['Dz_loss'])
        train_op_D = train_op_Dz

        train_op_vae = opt_vae.minimize(loss=loss['vae_loss'],
                                        var_list=train_params['vae_loss'])

    # =====================================
    # TF Graph Handler
    asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset"))
    img_gen_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img_gen"))
    img_rec_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img_rec"))
    img_itpl_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img_itpl"))

    log_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "log"))
    train_log_file = os.path.join(log_dir, "train.log")

    summary_dir = make_dir_if_not_exist(
        os.path.join(args.output_dir, "summary_tf"))
    model_dir = make_dir_if_not_exist(os.path.join(args.output_dir,
                                                   "model_tf"))

    train_helper = SimpleTrainHelper(
        log_dir=summary_dir,
        save_dir=model_dir,
        max_to_keep=3,
        max_to_keep_best=1,
    )
    # =====================================

    # =====================================
    # Training Loop
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)
    train_helper.initialize(sess,
                            init_variables=True,
                            create_summary_writer=True)

    Dz_fetch_keys = [
        'Dz_loss', 'Dz_tc_loss', 'Dz_loss_normal', 'Dz_loss_factor',
        'Dz_avg_prob_normal', 'Dz_avg_prob_factor', 'gp0_z_tc'
    ]
    D_fetch_keys = Dz_fetch_keys
    vae_fetch_keys = ['vae_loss', 'rec_x', 'kld_loss', 'tc_loss']

    train_sampler = ContinuousIndexSampler(num_train,
                                           args.batch_size,
                                           shuffle=True)

    import math
    num_batch_per_epochs = int(math.ceil(num_train / args.batch_size))

    global_step = 0
    for epoch in range(args.epochs):
        for _ in range(num_batch_per_epochs):
            global_step += 1

            batch_ids = train_sampler.sample_ids()
            x = celebA_loader.sample_images_from_dataset(
                sess, 'train', batch_ids)

            z = np.random.randn(len(x), args.z_dim)

            batch_ids_2 = np.random.choice(num_train, size=len(batch_ids))
            xa = celebA_loader.sample_images_from_dataset(
                sess, 'train', batch_ids_2)

            for i in range(args.D_steps):
                _, Dm = sess.run(
                    [train_op_D,
                     model.get_output(D_fetch_keys, as_dict=True)],
                    feed_dict={
                        model.is_train: True,
                        model.x_ph: x,
                        model.z_ph: z,
                        model.xa_ph: xa
                    })

            for i in range(args.vae_steps):
                _, VAEm = sess.run(
                    [
                        train_op_vae,
                        model.get_output(vae_fetch_keys, as_dict=True)
                    ],
                    feed_dict={
                        model.is_train: True,
                        model.x_ph: x,
                        model.z_ph: z,
                        model.xa_ph: xa
                    })

            if global_step % args.save_freq == 0:
                train_helper.save(sess, global_step)

            if global_step % args.log_freq == 0:
                log_str = "\n[FactorVAE (celebA)/{}, {}]".format(args.enc_dec_model, args.run) + \
                          "\nEpoch {}/{}, Step {}, vae_loss: {:.4f}, Dz_loss: {:.4f}, Dz_tc_loss: {:.4f}".format(
                              epoch, args.epochs, global_step, VAEm['vae_loss'], Dm['Dz_loss'], Dm['Dz_tc_loss']) + \
                          "\nrec_x: {:.4f}, kld_loss: {:.4f}, tc_loss: {:.4f}".format(
                              VAEm['rec_x'], VAEm['kld_loss'], VAEm['tc_loss']) + \
                          "\nDz_loss_normal: {:.4f}, Dz_loss_factor: {:.4f}".format(
                              Dm['Dz_loss_normal'], Dm['Dz_loss_factor']) + \
                          "\nDz_avg_prob_normal: {:.4f}, Dz_avg_prob_factor: {:.4f}".format(
                              Dm['Dz_avg_prob_normal'], Dm['Dz_avg_prob_factor']) + \
                          "\ngp0_z_tc_coeff: {:.4f}, gp0_z_tc: {:.4f}".format(args.gp0_z_tc_coeff, Dm['gp0_z_tc'])

                print(log_str)
                with open(train_log_file, "a") as f:
                    f.write(log_str)
                    f.write("\n")
                f.close()

                train_helper.add_summary(
                    custom_tf_scalar_summary('vae_loss',
                                             VAEm['vae_loss'],
                                             prefix='train'), global_step)
                train_helper.add_summary(
                    custom_tf_scalar_summary('rec_x',
                                             VAEm['rec_x'],
                                             prefix='train'), global_step)
                train_helper.add_summary(
                    custom_tf_scalar_summary('kld_loss',
                                             VAEm['kld_loss'],
                                             prefix='train'), global_step)
                train_helper.add_summary(
                    custom_tf_scalar_summary('tc_loss',
                                             VAEm['tc_loss'],
                                             prefix='train'), global_step)

                train_helper.add_summary(
                    custom_tf_scalar_summary('Dz_tc_loss',
                                             Dm['Dz_tc_loss'],
                                             prefix='train'), global_step)
                train_helper.add_summary(
                    custom_tf_scalar_summary('Dz_loss_normal',
                                             Dm['Dz_loss_normal'],
                                             prefix='train'), global_step)
                train_helper.add_summary(
                    custom_tf_scalar_summary('Dz_loss_factor',
                                             Dm['Dz_loss_factor'],
                                             prefix='train'), global_step)

                train_helper.add_summary(
                    custom_tf_scalar_summary('Dz_prob_normal',
                                             Dm['Dz_avg_prob_normal'],
                                             prefix='train'), global_step)
                train_helper.add_summary(
                    custom_tf_scalar_summary('Dz_prob_factor',
                                             Dm['Dz_avg_prob_factor'],
                                             prefix='train'), global_step)

            if global_step % args.viz_gen_freq == 0:
                # Generate images
                # ------------------------- #
                z = np.random.randn(64, args.z_dim)
                img_file = os.path.join(img_gen_dir,
                                        'step[%d]_gen_test.png' % global_step)

                model.generate_images(
                    img_file,
                    sess,
                    z,
                    block_shape=[8, 8],
                    batch_size=args.batch_size,
                    dec_output_2_img_func=binary_float_to_uint8)
                # ------------------------- #

            if global_step % args.viz_rec_freq == 0:
                # Reconstruct images
                # ------------------------- #
                x = celebA_loader.sample_images_from_dataset(
                    sess, 'test',
                    np.random.choice(num_test, size=64, replace=False))

                img_file = os.path.join(img_rec_dir,
                                        'step[%d]_rec_test.png' % global_step)

                model.reconstruct_images(
                    img_file,
                    sess,
                    x,
                    block_shape=[8, 8],
                    batch_size=args.batch_size,
                    dec_output_2_img_func=binary_float_to_uint8)
                # ------------------------- #

            if global_step % args.viz_itpl_freq == 0:
                # Interpolate images
                # ------------------------- #
                x1 = celebA_loader.sample_images_from_dataset(
                    sess, 'test',
                    np.random.choice(num_test, size=12, replace=False))
                x2 = celebA_loader.sample_images_from_dataset(
                    sess, 'test',
                    np.random.choice(num_test, size=12, replace=False))

                img_file = os.path.join(img_itpl_dir,
                                        'step[%d]_itpl_test.png' % global_step)

                model.interpolate_images(
                    img_file,
                    sess,
                    x1,
                    x2,
                    num_itpl_points=12,
                    batch_on_row=True,
                    batch_size=args.batch_size,
                    dec_output_2_img_func=binary_float_to_uint8)
                # ------------------------- #

        if epoch % 100 == 0:
            train_helper.save_separately(
                sess,
                model_name="model_epoch{}".format(epoch),
                global_step=global_step)

    # Last save
    train_helper.save(sess, global_step)
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = FactorVAE([64, 64, 1],
                      args.z_dim,
                      encoder=encoder,
                      decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True,
                      gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
        'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=5,
                        suppress=True)
    num_samples = args.num_samples
    print("num_samples: {}".format(num_samples))

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data.npz")

    if not exists(z_data_file):
        all_z_samples = []
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_train,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = x_train[batch_ids]

            z_samples, z_mean, z_stddev = sess.run(model.get_output(
                ['z1_gen', 'z_mean', 'z_stddev']),
                                                   feed_dict={
                                                       model.is_train: False,
                                                       model.x_ph: x
                                                   })

            all_z_samples.append(z_samples)
            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_samples = np.concatenate(all_z_samples, axis=0)
        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file,
                            all_z_samples=all_z_samples,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_samples = f['all_z_samples']
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ================================= #

    all_z_samples = np.reshape(all_z_samples, [3, 6, 40, 32, 32, -1])
    all_z_mean = np.reshape(all_z_mean, [3, 6, 40, 32, 32, -1])
    all_z_stddev = np.reshape(all_z_stddev, [3, 6, 40, 32, 32, -1])

    if args.gpu_support == 'cupy':
        print("Use cupy instead of numpy!")
        results = MIG_4_dSprites_cupy(all_z_samples,
                                      all_z_mean,
                                      all_z_stddev,
                                      version=1,
                                      batch_size=10,
                                      num_samples=num_samples,
                                      gpu=args.gpu_id)
    else:
        results = MIG_4_dSprites(all_z_samples,
                                 all_z_mean,
                                 all_z_stddev,
                                 num_samples=num_samples,
                                 version=1,
                                 batch_size=200)

    result_file = join(save_dir,
                       "results[num_samples={}].npz".format(num_samples))
    np.savez_compressed(result_file, **results)

    f.close()
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny",
                                         resize_size=args.celebA_resize_size))
    num_train = celebA_loader.num_train_data

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(
            args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim,
                                 stochastic=True,
                                 activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3],
                                 activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(
            args.enc_dec_model))

    model = AAE([img_height, img_width, 3],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=True)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "AAE_{}".format(args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=3,
                        suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion

    f = open(join(
        save_dir, 'log[bins={},bin_limits={},data={}].txt'.format(
            num_bins, bin_limits, data_proportion)),
             mode='w')
    print_ = functools.partial(print_both, file=f)

    result_file = join(
        args.interpretability_metrics_dir, "AAE_{}".format(args.run),
        "results[bins={},bin_limits={},data={}].npz".format(
            num_bins, bin_limits, data_proportion))

    results = np.load(result_file, "r")

    print_("")
    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Plotting
    # =========================================== #
    # seed = 389
    # num_samples = 30
    seed = 398
    num_samples = 1

    ids = list(range(seed, seed + num_samples))
    print_("\nids: {}".format(ids))

    data = celebA_loader.sample_images_from_dataset(sess, 'train', ids)

    span = 3
    points_one_side = 5

    attr_names = celebA_loader.attributes
    print_("attr_names: {}".format(attr_names))
    print_("results.keys: {}".format(list(results.keys())))

    # (z_dim, num_attrs)
    MI_ids_sorted = results['MI_ids_sorted']
    MI_sorted = results['MI_sorted']

    MI_gap_y = results['MI_gap_y']
    H_y = results['H_y_4_diff_z'][:, 0]
    assert MI_ids_sorted.shape[1] == len(attr_names) == len(MI_gap_y) == len(H_y), \
        "MI_ids_sorted.shape: {}, len(attr_names): {}, len(MI_gap_y): {}, len(H_y): {}".format(
            MI_ids_sorted.shape, len(attr_names), len(MI_gap_y), len(H_y))

    print_("\nShow RMIG!")
    for i in range(len(attr_names)):
        print("{}: RMIG: {:.4f}, RMIG (unnorm): {:.4f}, H: {:.4f}".format(
            attr_names[i], MI_gap_y[i], MI_gap_y[i] * H_y[i], H_y[i]))

    print_("\nShow JEMMI!")
    H_z_y = results['H_z_y']
    MI_z_y = results['MI_z_y']

    ids_sorted_by_MI = np.argsort(MI_z_y, axis=0)[::-1]
    MI_z_y_sorted = np.take_along_axis(MI_z_y, ids_sorted_by_MI, axis=0)
    H_z_y_sorted = np.take_along_axis(H_z_y, ids_sorted_by_MI, axis=0)

    H_diff = H_z_y_sorted[0, :] - MI_z_y_sorted[0, :]
    JEMMI_unnorm = H_diff + MI_z_y_sorted[1, :]
    JEMMI_norm = JEMMI_unnorm / (np.log(num_bins) + H_y)

    for i in range(len(attr_names)):
        print(
            "{}: JEMMI: {:.4f}, JEMMI (unnorm): {:.4f}, H_diff: {:.4f}, I2: {:.4f}, top 2 latents: z{}, z{}"
            .format(attr_names[i], JEMMI_norm[i], JEMMI_unnorm[i], H_diff[i],
                    MI_z_y_sorted[1, i], ids_sorted_by_MI[0, i],
                    ids_sorted_by_MI[1, i]))

    # Uncomment if you want
    '''
    for n in range(len(ids)):
        for k in range(len(attr_names)):
            MI_ids_top10 = MI_ids_sorted[:10, k]
            MI_top10 = MI_sorted[:10, k]
            print("Plot top 10 latents for factor '{}'!".format(attr_names[k]))

            img_file = join(save_dir, "x_train[{}][attr={}][bins={},bin_limits={},data={}].png".
                            format(ids[n], attr_names[k], num_bins, bin_limits, data_proportion))

            model.cond_all_latents_traverse_v2(img_file, sess, data[n],
                                               z_comps=MI_ids_top10,
                                               z_comp_labels=["z[{}] ({:.4f})".format(comp, mi)
                                                              for comp, mi in zip(MI_ids_top10, MI_top10)],
                                               span=span, points_1_side=points_one_side,
                                               hl_x=True,
                                               font_size=9,
                                               title="{} (MI gap={:.4f}, H={:.4f})".format(
                                                   attr_names[k], MI_gap_y[k], H_y[k]),
                                               title_font_scale=1.5,
                                               subplot_adjust={'left': 0.16, 'right': 0.99,
                                                               'bottom': 0.01, 'top': 0.95},
                                               size_inches=(6.5, 5.2),
                                               batch_size=args.batch_size,
                                               dec_output_2_img_func=binary_float_to_uint8)
    '''

    # Top 5 only
    for n in range(len(ids)):
        for k in range(len(attr_names)):
            MI_ids_top10 = MI_ids_sorted[:5, k]
            MI_top10 = MI_sorted[:5, k]
            print("Plot top 5 latents for factor '{}'!".format(attr_names[k]))

            img_file = join(
                save_dir, "train{}_attr={}_bins={}_data={}.png".format(
                    ids[n], attr_names[k], num_bins, data_proportion))

            model.cond_all_latents_traverse_v2(
                img_file,
                sess,
                data[n],
                z_comps=MI_ids_top10,
                z_comp_labels=[
                    "z[{}] ({:.4f})".format(comp, mi)
                    for comp, mi in zip(MI_ids_top10, MI_top10)
                ],
                span=span,
                points_1_side=points_one_side,
                hl_x=True,
                font_size=9,
                title="{} (MI gap={:.4f}, H={:.4f})".format(
                    attr_names[k], MI_gap_y[k], H_y[k]),
                title_font_scale=1.5,
                subplot_adjust={
                    'left': 0.16,
                    'right': 0.99,
                    'bottom': 0.005,
                    'top': 0.93
                },
                size_inches=(6.5, 2.8),
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)
    '''
Example #15
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size))
    num_train = celebA_loader.num_train_data

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model))

    model = FactorVAE([img_height, img_width, 3], args.z_dim,
                      encoder=encoder, decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)), ask_4_permission=True)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True)

    num_bins = args.num_bins
    data_proportion = args.data_proportion
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    top_k = args.top_k

    f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'.
                  format(num_bins, bin_limits, data_proportion)), mode='w')
    print_ = functools.partial(print_both, file=f)

    result_file = join(args.informativeness_metrics_dir, "FactorVAE_{}".format(args.run),
                       'results[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))

    results = np.load(result_file, "r")

    print_("")
    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))
    print_("top_k: {}".format(top_k))

    # Plotting
    # =========================================== #
    # seed = 389
    # num_samples = 30
    seed = 398
    num_samples = 1

    ids = list(range(seed, seed + num_samples))
    print_("\nids: {}".format(ids))

    data = celebA_loader.sample_images_from_dataset(sess, 'train', ids)

    span = 3
    points_one_side = 5

    print_("sorted_MI: {}".format(results["sorted_MI_z_x"]))
    print_("sorted_z_ids: {}".format(results["sorted_z_comps"]))
    print_("sorted_norm_MI: {}".format(results["sorted_norm_MI_z_x"]))
    print_("sorted_norm_z_ids: {}".format(results["sorted_norm_z_comps"]))

    top_MI = results["sorted_MI_z_x"][:top_k]
    top_z_ids = results["sorted_z_comps"][:top_k]
    top_norm_MI = results["sorted_norm_MI_z_x"][:top_k]
    top_norm_z_ids = results["sorted_norm_z_comps"][:top_k]

    print("Matplotlib font size: {}".format(matplotlib.rcParams['font.size'],))
    for n in range(len(ids)):
        if top_k == 10:
            print("Plot conditional all comps z traverse with train sample {}!".format(ids[n]))

            img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={}].png".
                            format(ids[n], num_bins, bin_limits, data_proportion))
            model.cond_all_latents_traverse_v2(img_file, sess, data[n],
                                               z_comps=top_z_ids,
                                               z_comp_labels=["z[{}] ({:.2f})".format(comp, mi)
                                                              for comp, mi in zip(top_z_ids, top_MI)],
                                               span=span, points_1_side=points_one_side,
                                               hl_x=True,
                                               font_size=matplotlib.rcParams['font.size'],
                                               subplot_adjust={'left': 0.15, 'right': 0.99,
                                                               'bottom': 0.01, 'top': 0.99},
                                               size_inches=(6.3, 4.9),
                                               batch_size=args.batch_size,
                                               dec_output_2_img_func=binary_float_to_uint8)

            img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={},norm].png".
                            format(ids[n], num_bins, bin_limits, data_proportion))
            model.cond_all_latents_traverse_v2(img_file, sess, data[n],
                                               z_comps=top_norm_z_ids,
                                               z_comp_labels=["z[{}] ({:.2f})".format(comp, mi)
                                                              for comp, mi in zip(top_norm_z_ids, top_norm_MI)],
                                               span=span, points_1_side=points_one_side,

                                               hl_x=True,
                                               font_size=matplotlib.rcParams['font.size'],
                                               subplot_adjust={'left': 0.15, 'right': 0.99,
                                                               'bottom': 0.01, 'top': 0.99},
                                               size_inches=(6.3, 4.9),
                                               batch_size=args.batch_size,
                                               dec_output_2_img_func=binary_float_to_uint8)
        else:
            print("Plot conditional all comps z traverse with train sample {}!".format(ids[n]))

            img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={}].png".
                            format(ids[n], num_bins, bin_limits, data_proportion))
            model.cond_all_latents_traverse_v2(img_file, sess, data[n],
                                               z_comps=top_z_ids,
                                               z_comp_labels=["z[{}] ({:.2f})".format(comp, mi)
                                                              for comp, mi in zip(top_z_ids, top_MI)],
                                               span=span, points_1_side=points_one_side,
                                               hl_x=True,
                                               font_size=5,
                                               subplot_adjust={'left': 0.19, 'right': 0.99,
                                                               'bottom': 0.01, 'top': 0.99},
                                               size_inches=(2.98, 9.85),
                                               batch_size=args.batch_size,
                                               dec_output_2_img_func=binary_float_to_uint8)

            img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={},norm].png".
                            format(ids[n], num_bins, bin_limits, data_proportion))
            model.cond_all_latents_traverse_v2(img_file, sess, data[n],
                                               z_comps=top_norm_z_ids,
                                               z_comp_labels=["z[{}] ({:.2f})".format(comp, mi)
                                                              for comp, mi in zip(top_norm_z_ids, top_norm_MI)],
                                               span=span, points_1_side=points_one_side,
                                               hl_x=True,
                                               font_size=5,
                                               subplot_adjust={'left': 0.19, 'right': 0.99,
                                                               'bottom': 0.01, 'top': 0.99},
                                               size_inches=(2.98, 9.85),
                                               batch_size=args.batch_size,
                                               dec_output_2_img_func=binary_float_to_uint8)
    # =========================================== #

    f.close()
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny",
                                         resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(
            args.activation))

    if args.enc_dec_model == "1Konny":
        assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(
            args.z_dim)

        encoder = Encoder_1Konny(args.z_dim,
                                 stochastic=True,
                                 activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3],
                                 activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(
            args.enc_dec_model))

    model = FactorVAE([img_height, img_width, 3],
                      args.z_dim,
                      encoder=encoder,
                      decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True,
                      gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # =====================================

    # Reconstruct
    # ======================================= #
    seed = 341
    rs = np.random.RandomState(seed)
    ids = rs.choice(celebA_loader.num_test_data, size=15)

    x = celebA_loader.sample_images_from_dataset(sess, 'test', ids)

    save_dir = make_dir_if_not_exist(join(args.save_dir, args.run))

    img_file = join(save_dir, 'x_test.png')
    save_img_block(img_file, binary_float_to_uint8(np.expand_dims(x, axis=0)))

    img_file = join(save_dir, 'recx_test_1.png')
    model.reconstruct_images(img_file,
                             sess,
                             x,
                             block_shape=[1, len(ids)],
                             batch_size=-1,
                             show_original_images=False,
                             dec_output_2_img_func=binary_float_to_uint8)

    img_file = join(save_dir, 'recx_test_2.png')
    model.reconstruct_images(img_file,
                             sess,
                             x,
                             block_shape=[1, len(ids)],
                             batch_size=-1,
                             show_original_images=True,
                             dec_output_2_img_func=binary_float_to_uint8)
def main(args):
    # Load config
    # =========================================== #
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)
    # =========================================== #

    # Load dataset
    # =========================================== #
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']

    x_train = np.reshape(x_train, [3, 6, 40, 32, 32, 64, 64, 1])
    # =========================================== #

    # Build model
    # =========================================== #
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = FactorVAE([64, 64, 1],
                      args.z_dim,
                      encoder=encoder,
                      decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True,
                      gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
        'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()
    # =========================================== #

    # Initialize session
    # =========================================== #
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)
    train_helper.load(sess, load_step=args.load_step)

    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =========================================== #

    # Load result file
    # =========================================== #
    result_file = join(args.SEPIN_dir, "{}_{}".format(args.enc_dec_model,
                                                      args.run),
                       "results[num_samples={}].npz".format(args.num_samples))

    results = np.load(result_file, "r")
    print("results.keys: {}".format(list(results.keys())))

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=3,
                        suppress=True)
    # =========================================== #

    # Plotting
    # =========================================== #
    data = [
        x_train[0, 3, 20, 16, 16], x_train[1, 3, 20, 16, 16], x_train[2, 3, 20,
                                                                      16, 16]
    ]

    gt_factors = ['Shape', 'Scale', 'Rotation', 'Pos_x', 'Pos_y']

    # (num_latents,)
    MI_zi_x = results['MI_zi_x']
    SEP_zi = results['SEP_zi']
    ids_sorted = np.argsort(SEP_zi, axis=0)[::-1]

    print("")
    print("MI_zi_x: {}".format(MI_zi_x))
    print("SEP_zi: {}".format(SEP_zi))
    print("ids_sorted: {}".format(ids_sorted))

    span = 3
    points_one_side = 5

    for n in range(len(data)):
        img_file = join(
            save_dir, "sep_x{}_num_samples={}.png".format(n, args.num_samples))
        model.cond_all_latents_traverse_v2(
            img_file,
            sess,
            data[n],
            z_comps=ids_sorted,
            z_comp_labels=[
                "z[{}] (SEP={:.4f}, INFO={:.4f})".format(
                    idx, SEP_zi[idx], MI_zi_x[idx]) for idx in ids_sorted
            ],
            span=span,
            points_1_side=points_one_side,
            hl_x=True,
            font_size=9,
            title_font_scale=1.5,
            subplot_adjust={
                'left': 0.55,
                'right': 0.99,
                'bottom': 0.01,
                'top': 0.99
            },
            size_inches=(4.0, 1.7),
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)

    f.close()
Example #18
0
def main(args):
    # Create output directory
    # ===================================== #
    args.output_dir = os.path.join(args.output_dir, args.model_name, args.run)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    else:
        if args.force_rm_dir:
            import shutil
            shutil.rmtree(args.output_dir, ignore_errors=True)
            print("Removed '{}'".format(args.output_dir))
        else:
            raise ValueError("Output directory '{}' existed. 'force_rm_dir' "
                             "must be set to True!".format(args.output_dir))
        os.mkdir(args.output_dir)

    save_args(os.path.join(args.output_dir, 'config.json'), args)
    # pp = pprint.PrettyPrinter(indent=4)
    # pp.pprint(args.__dict__)
    # ===================================== #

    # Specify data
    # ===================================== #
    if args.dataset == "mnist":
        x_shape = [28, 28, 1]
    elif args.dataset == "mnist_3" or args.dataset == "mnistm":
        x_shape = [28, 28, 3]
    elif args.dataset == "svhn" or args.dataset == "cifar10" or args.dataset == "cifar100":
        x_shape = [32, 32, 3]
    else:
        raise ValueError("Do not support dataset '{}'!".format(args.dataset))

    if args.dataset == "cifar100":
        num_classes = 100
    else:
        num_classes = 10

    print("x_shape: {}".format(x_shape))
    print("num_classes: {}".format(num_classes))
    # ===================================== #

    # Load data
    # ===================================== #
    print("Loading {}!".format(args.dataset))
    train_loader = SimpleDataset4SSL()
    train_loader.load_npz_data(args.train_file)
    train_loader.create_ssl_data(args.num_labeled,
                                 num_classes=num_classes,
                                 shuffle=True,
                                 seed=args.seed)
    if args.input_norm != "applied":
        train_loader.x = uint8_to_binary_float(train_loader.x)
    else:
        print("Input normalization has been applied on train data!")

    test_loader = SimpleDataset()
    test_loader.load_npz_data(args.test_file)
    if args.input_norm != "applied":
        test_loader.x = uint8_to_binary_float(test_loader.x)
    else:
        print("Input normalization has been applied on test data!")

    print("train_l/train_u/test: {}/{}/{}".format(
        train_loader.num_labeled_data, train_loader.num_unlabeled_data,
        test_loader.num_data))

    # import matplotlib.pyplot as plt
    # print("train_l.y[:10]: {}".format(train_l.y[:10]))
    # print("train_u.y[:10]: {}".format(train_u.y[:10]))
    # print("test.y[:10]: {}".format(test.y[:10]))
    # fig, axes = plt.subplots(3, 5)
    # for i in range(5):
    #     axes[0][i].imshow(train_l.x[i])
    #     axes[1][i].imshow(train_u.x[i])
    #     axes[2][i].imshow(test.x[i])
    # plt.show()

    if args.dataset == "mnist":
        train_loader.x = np.expand_dims(train_loader.x, axis=-1)
        test_loader.x = np.expand_dims(test_loader.x, axis=-1)
    elif args.dataset == "mnist_3":
        train_loader.x = np.stack(
            [train_loader.x, train_loader.x, train_loader.x], axis=-1)
        test_loader.x = np.stack([test_loader.x, test_loader.x, test_loader.x],
                                 axis=-1)

    # Data Preprocessing + Augmentation
    # ------------------------------------- #
    if args.input_norm == 'none' or args.input_norm == 'applied':
        print("Do not apply any normalization!")
    elif args.input_norm == 'zca':
        print("Apply ZCA whitening on data!")
        normalizer = ZCA()
        normalizer.fit(train_loader.x, eps=1e-5)
        train_loader.x = normalizer.transform(train_loader.x)
        test_loader.x = normalizer.transform(test_loader.x)
    elif args.input_norm == 'standard':
        print("Apply Standardization on data!")
        normalizer = Standardization()
        normalizer.fit(train_loader.x)
        train_loader.x = normalizer.transform(train_loader.x)
        test_loader.x = normalizer.transform(test_loader.x)
    else:
        raise ValueError("Do not support 'input_norm'={}".format(
            args.input_norm))
    # ------------------------------------- #
    # ===================================== #

    # Hyperparameters
    # ===================================== #
    hyper_updater = HyperParamUpdater(
        ['lr', 'ema_momentum', 'cent_u_coeff', 'cons_coeff'], [
            args.lr_max, args.ema_momentum_init, args.cent_u_coeff_max,
            args.cons_coeff_max
        ],
        scope='moving_hyperparams')
    # ===================================== #

    # Build model
    # ===================================== #
    # IMPORTANT: Remember to test with No Gaussian Noise
    print("args.gauss_noise: {}".format(args.gauss_noise))

    if args.model_name == "9310gaurav":
        main_classifier = MainClassifier_9310gaurav(
            num_classes=num_classes, use_gauss_noise=args.gauss_noise)
    else:
        raise ValueError("Do not support model_name='{}'!".format(
            args.model_name))

    # Input Perturber
    # ------------------------------------- #
    # Input perturber only performs 'translating_pixels' (Both CIFAR-10 and SVHN) here
    input_perturber = InputPerturber(
        normalizer=None,  # We do not use normalizer here!
        flip_horizontally=args.flip_horizontally,
        flip_vertically=False,  # We do not flip images vertically!
        translating_pixels=args.translating_pixels,
        noise_std=0.0)  # We do not add noise here!
    # ------------------------------------- #

    # Main model
    # ------------------------------------- #
    model = MeanTeacher(x_shape=x_shape,
                        y_shape=num_classes,
                        main_classifier=main_classifier,
                        input_perturber=input_perturber,
                        cons_mode=args.cons_mode,
                        ema_momentum=hyper_updater.variables['ema_momentum'],
                        cons_4_unlabeled_only=args.cons_4_unlabeled_only,
                        weight_decay=args.weight_decay)

    loss_coeff_dict = {
        'cross_ent_l': args.cross_ent_l,
        'cond_ent_u': hyper_updater.variables['cent_u_coeff'],
        'cons': hyper_updater.variables['cons_coeff'],
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_list(trainable_only=False)
    # ------------------------------------- #
    # ===================================== #

    # Build optimizer
    # ===================================== #
    losses = model.get_loss()
    train_params = model.get_train_params()
    opt_AE = tf.train.MomentumOptimizer(
        learning_rate=hyper_updater.variables['lr'],
        momentum=args.lr_momentum,
        use_nesterov=True)

    # Contain both batch norm update and teacher param update
    update_ops = model.get_all_update_ops()
    print("update_ops: {}".format(update_ops))
    with tf.control_dependencies(update_ops):
        train_op_AE = opt_AE.minimize(loss=losses['loss'],
                                      var_list=train_params['loss'])
    # ===================================== #

    # Create directories
    # ===================================== #
    asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset"))
    img_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img"))
    log_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "log"))
    train_log_file = os.path.join(log_dir, "train.log")

    summary_dir = make_dir_if_not_exist(
        os.path.join(args.output_dir, "summary_tf"))
    model_dir = make_dir_if_not_exist(os.path.join(args.output_dir,
                                                   "model_tf"))
    # ===================================== #

    # Create session
    # ===================================== #
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    train_helper = SimpleTrainHelper(log_dir=summary_dir,
                                     save_dir=model_dir,
                                     max_to_keep=args.num_save,
                                     max_to_keep_best=args.num_save_best)
    train_helper.initialize(sess,
                            init_variables=True,
                            create_summary_writer=True)
    # ===================================== #

    # Start training
    # ===================================== #
    # Summarizer
    # ------------------------------------- #
    fetch_keys_AE_l = ['acc_y_l', 'cross_ent_l']
    fetch_keys_AE_u = ['acc_y_u', 'cond_ent_u', 'cons']
    # To compare between MDL loss and xent+consistency to see whether MDL loss
    # is a better indicator for generalization compared to xent+consistency or not
    fetch_keys_AE = fetch_keys_AE_l + fetch_keys_AE_u
    train_summarizer = ScalarSummarizer([(key, 'mean')
                                         for key in fetch_keys_AE])

    fetch_keys_test = ['acc_y', 'acc_y_stu']
    eval_summarizer = ScalarSummarizer([(key, 'mean')
                                        for key in fetch_keys_test])
    # ------------------------------------- #

    # Data sampler
    # ------------------------------------- #
    # The number of labeled data varies during training
    if args.batch_size_labeled <= 0:
        sampler = ContinuousIndexSampler(train_loader.num_data,
                                         args.batch_size,
                                         shuffle=True)
        sampling_separately = False
        print("batch_size_l, batch_size_u vary but their sum={}!".format(
            args.batch_size))

    elif 0 < args.batch_size_labeled < args.batch_size:
        batch_size_l = args.batch_size_labeled
        batch_size_u = args.batch_size - args.batch_size_labeled
        print("batch_size_l/batch_size_u: {}/{}".format(
            batch_size_l, batch_size_u))

        # IMPORTANT: Here we must use 'train_loader.labeled_ids' and 'train_loader.unlabeled_ids',
        # NOT 'train_loader.num_labeled_data' and 'train_loader.num_unlabeled_data'
        sampler_l = ContinuousIndexSampler(train_loader.labeled_ids,
                                           batch_size_l,
                                           shuffle=True)
        sampler_u = ContinuousIndexSampler(train_loader.unlabeled_ids,
                                           batch_size_u,
                                           shuffle=True)
        sampler = ContinuousIndexSamplerGroup(sampler_l, sampler_u)
        sampling_separately = True

    else:
        raise ValueError(
            "'args.batch_size_labeled' must be in ({}, {})!".format(
                0, args.batch_size))
    # ------------------------------------- #

    # Annealer
    # ------------------------------------- #
    step_rampup_annealer = StepAnnealing(args.rampup_len_step,
                                         value_0=0,
                                         value_1=1)
    sigmoid_rampup_annealer = SigmoidRampup(args.rampup_len_step)
    sigmoid_rampdown_annealer = SigmoidRampdown(args.rampdown_len_step,
                                                args.steps)
    # ------------------------------------- #

    # Results Tracker
    # ------------------------------------- #
    tracker = BestResultsTracker([('acc_y', 'greater')],
                                 num_best=args.num_save_best)
    # ------------------------------------- #

    import math
    batches_per_epoch = int(math.ceil(train_loader.num_data / args.batch_size))
    global_step = 0
    log_time_start = time()

    for epoch in range(args.epochs):
        if global_step >= args.steps:
            break

        for batch in range(batches_per_epoch):
            if global_step >= args.steps:
                break

            global_step += 1

            # Update hyper parameters
            # ---------------------------------- #
            step_rampup = step_rampup_annealer.get_value(global_step)
            sigmoid_rampup = sigmoid_rampup_annealer.get_value(global_step)
            sigmoid_rampdown = sigmoid_rampdown_annealer.get_value(global_step)

            lr = sigmoid_rampup * sigmoid_rampdown * args.lr_max
            ema_momentum = (
                1.0 - step_rampup
            ) * args.ema_momentum_init + step_rampup * args.ema_momentum_final
            cent_u_coeff = sigmoid_rampup * args.cent_u_coeff_max
            cons_coeff = sigmoid_rampup * args.cons_coeff_max

            hyper_updater.update(sess,
                                 feed_dict={
                                     'lr': lr,
                                     'ema_momentum': ema_momentum,
                                     'cent_u_coeff': cent_u_coeff,
                                     'cons_coeff': cons_coeff
                                 })
            hyper_vals = hyper_updater.get_value(sess)
            hyper_vals['sigmoid_rampup'] = sigmoid_rampup
            hyper_vals['sigmoid_rampdown'] = sigmoid_rampdown
            hyper_vals['step_rampup'] = step_rampup
            # ---------------------------------- #

            # Train model
            # ---------------------------------- #
            if sampling_separately:
                # print("Sample separately!")
                batch_ids_l, batch_ids_u = sampler.sample_group_of_ids()

                xl, yl, label_flag_l = train_loader.fetch_batch(batch_ids_l)
                xu, yu, label_flag_u = train_loader.fetch_batch(batch_ids_u)
                assert np.all(label_flag_l), "'label_flag_l: {}'".format(
                    label_flag_l)
                assert not np.any(label_flag_u), "'label_flag_u: {}'".format(
                    label_flag_u)

                x = np.concatenate([xl, xu], axis=0)
                y = np.concatenate([yl, yu], axis=0)
                label_flag = np.concatenate([label_flag_l, label_flag_u],
                                            axis=0)
            else:
                # print("Sample jointly!")
                batch_ids = sampler.sample_ids()
                x, y, label_flag = train_loader.fetch_batch(batch_ids)

            _, AEm = sess.run(
                [train_op_AE,
                 model.get_output(fetch_keys_AE, as_dict=True)],
                feed_dict={
                    model.is_train: True,
                    model.x_ph: x,
                    model.y_ph: y,
                    model.label_flag_ph: label_flag
                })

            batch_results = AEm
            train_summarizer.accumulate(batch_results, args.batch_size)
            # ---------------------------------- #

            if global_step % args.save_freq == 0:
                train_helper.save(sess, global_step)

            if global_step % args.log_freq == 0:
                log_time_end = time()
                log_time_gap = (log_time_end - log_time_start)
                log_time_start = log_time_end

                summaries, results = train_summarizer.get_summaries_and_reset(
                    summary_prefix='train')
                train_helper.add_summaries(summaries, global_step)
                train_helper.add_summaries(
                    custom_tf_scalar_summaries(hyper_vals,
                                               prefix="moving_hyper"),
                    global_step)

                log_str = "\n[MeanTeacher ({})/{}, {}], " \
                          "Epoch {}/{}, Batch {}/{} Step {} ({:.2f}s) (train)".format(
                              args.dataset, args.model_name, args.run, epoch, args.epochs,
                              batch, batches_per_epoch, global_step-1, log_time_gap) + \
                          "\n" + ", ".join(["{}: {:.4f}".format(key, results[key])
                                            for key in fetch_keys_AE_l]) + \
                          "\n" + ", ".join(["{}: {:.4f}".format(key, results[key])
                                            for key in fetch_keys_AE_u]) + \
                          "\n" + ", ".join(["{}: {:.4f}".format(key, hyper_vals[key])
                                           for key in hyper_vals])

                print(log_str)
                with open(train_log_file, "a") as f:
                    f.write(log_str)
                    f.write("\n")
                f.close()

            if global_step % args.eval_freq == 0:
                for batch_ids in iterate_data(test_loader.num_data,
                                              args.batch_size,
                                              shuffle=False,
                                              include_remaining=True):
                    x, y = test_loader.fetch_batch(batch_ids)

                    batch_results = sess.run(model.get_output(fetch_keys_test,
                                                              as_dict=True),
                                             feed_dict={
                                                 model.is_train: False,
                                                 model.x_ph: x,
                                                 model.y_ph: y
                                             })

                    eval_summarizer.accumulate(batch_results, len(batch_ids))

                summaries, results = eval_summarizer.get_summaries_and_reset(
                    summary_prefix='test')
                train_helper.add_summaries(summaries, global_step)

                log_str = "Epoch {}/{}, Batch {}/{} (test), acc_y: {:.4f}, acc_y_stu: {:.4f}".format(
                    epoch, args.epochs, batch, batches_per_epoch,
                    results['acc_y'], results['acc_y_stu'])

                print(log_str)
                with open(train_log_file, "a") as f:
                    f.write(log_str)
                    f.write("\n")
                f.close()

                is_better = tracker.check_and_update(results, global_step)
                if is_better['acc_y']:
                    train_helper.save_best(sess, global_step=global_step)

    # Last save
    train_helper.save(sess, global_step)
Example #19
0
def main(args):
    # Load config
    # ===================================== #
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)
    # ===================================== #

    # Load dataset
    # ===================================== #
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes'][:, 1:]

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))
    # ===================================== #

    # Build model
    # ===================================== #
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()
    # ===================================== #

    # Initialize session
    # ===================================== #
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)
    train_helper.load(sess, load_step=args.load_step)
    # ===================================== #

    # Experiments
    # ===================================== #
    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=5,
                        suppress=True)
    # ===================================== #

    # Compute representations
    # ===================================== #
    z_data_file = join(save_dir, "z_data.npz")

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev!")

        count = 0
        for batch_ids in iterate_data(num_train,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = x_train[batch_ids]

            z_samples, z_mean, z_stddev = sess.run(model.get_output(
                ['z1_gen', 'z_mean', 'z_stddev']),
                                                   feed_dict={
                                                       model.is_train: False,
                                                       model.x_ph: x
                                                   })

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ===================================== #

    cont_mask = [False, True, True, True, True
                 ] if args.continuous_only else None

    if args.classifier == "LASSO":
        results = compute_metrics_with_LASSO(latents=all_z_mean,
                                             factors=y_train,
                                             params={
                                                 'alpha': args.LASSO_alpha,
                                                 'max_iter': args.LASSO_iters
                                             },
                                             cont_mask=cont_mask)
        result_file = join(
            save_dir, "results[LASSO,{},alpha={},iters={}].npz".format(
                "cont" if args.continuous_only else "all", args.LASSO_alpha,
                args.LASSO_iters))
    else:
        results = compute_metrics_with_RandomForest(latents=all_z_mean,
                                                    factors=y_train,
                                                    params={
                                                        'n_estimators':
                                                        args.RF_trees,
                                                        'max_depth':
                                                        args.RF_depth
                                                    })
        result_file = join(
            save_dir, "results[RF,{},trees={},depth={}].npz".format(
                "cont" if args.continuous_only else "all", args.RF_trees,
                args.RF_depth))

    np.savez_compressed(result_file, **results)
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes'][:, 1:]

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))

    print("y_train[:10]: {}".format(y_train[:10]))

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(args.enc_dec_model))

    model = FactorVAE([64, 64, 1], args.z_dim,
                      encoder=encoder, decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
        'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True)

    num_bins = args.num_bins
    data_proportion = args.data_proportion
    num_data = int(data_proportion * num_train)
    assert num_data == num_train, "For dSprites, you must use all data!"

    # file
    f = open(join(save_dir, 'log[bins={},data={}].txt'.
                  format(num_bins, data_proportion)), mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)

    print_("num_bins: {}".format(num_bins))
    print_("data_proportion: {}".format(data_proportion))

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False):
            x = x_train[batch_ids]

            z_mean, z_stddev = sess.run(
                model.get_output(['z_mean', 'z_stddev']),
                feed_dict={model.is_train: False, model.x_ph: x})

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file, all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']

    print_("")
    print_("all_z_mean.shape: {}".format(all_z_mean.shape))
    print_("all_z_stddev.shape: {}".format(all_z_stddev.shape))
    # ================================= #

    # Transpose and compute MIG score
    # ================================= #
    assert len(all_z_mean) == len(y_train)

    # (num_latents, num_samples)
    all_z_mean = np.transpose(all_z_mean, [1, 0])
    print_("")
    print_("all_z_mean.shape: {}".format(all_z_mean.shape))

    y_train = np.transpose(y_train, [1, 0])
    print_("")
    print_("y_train.shape: {}".format(y_train.shape))

    # All
    # --------------------------------- #
    result_all = compute_mig(all_z_mean, y_train, is_discrete_z=False,
                             is_discrete_y=True, num_bins=num_bins)

    # (num_latents, num_factors)
    MI_gap_y = result_all['MI_gap_y']
    attr_ids_sorted = np.argsort(MI_gap_y, axis=0)[::-1].tolist()
    MI_gap_y_sorted = MI_gap_y[attr_ids_sorted].tolist()

    print_("")
    print_("MIG: {}".format(result_all['MIG']))
    print_("Sorted factors:\n{}".format(list(zip(attr_ids_sorted, MI_gap_y_sorted))))

    save_file = join(save_dir, "results[bins={},data={}].npz".format(num_bins, data_proportion))
    np.savez_compressed(save_file, **result_all)
    # --------------------------------- #
    # ================================= #

    f.close()
Example #21
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))

    print("y_train[:10]: {}".format(y_train[:10]))

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=5,
                        suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * num_train)
    assert num_data == num_train, "For dSprites, you must use all data!"
    eps = 1e-8

    # file
    f = open(join(
        save_dir, 'log[bins={},bin_limits={},data={}].txt'.format(
            num_bins, bin_limits, data_proportion)),
             mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[
        1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0],
                       bin_limits[1],
                       num_bins + 1,
                       endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths
               ) == num_bins, "len(bin_widths)={} while num_bins={}!".format(
                   len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths,
                             0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers
               ) == num_bins, "len(bin_centers)={} while num_bins={}!".format(
                   len(bin_centers), num_bins)
    # ================================= #

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_data,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = x_train[batch_ids]

            z_mean, z_stddev = sess.run(model.get_output(
                ['z_mean', 'z_stddev']),
                                        feed_dict={
                                            model.is_train: False,
                                            model.x_ph: x
                                        })

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ================================= #

    print_("")
    all_Q_z_cond_x = []
    for i in range(args.z_dim):
        print_("\nCompute all_Q_z{}_cond_x!".format(i))

        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean),
                                      500,
                                      shuffle=False,
                                      include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(
                np.expand_dims(bin_centers, axis=0),
                mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i],
                                    bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(
                np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(
                np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        # (num_samples, num_bins)
        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(np.sort(all_Q_s_cond_x[:30], axis=None))
        assert len(all_Q_s_cond_x) == num_train

        all_Q_z_cond_x.append(all_Q_s_cond_x)

    # (z_dim, num_samples, num_bins)
    all_Q_z_cond_x = np.asarray(all_Q_z_cond_x, dtype=np.float32)
    print_("all_Q_z_cond_x.shape: {}".format(all_Q_z_cond_x.shape))
    print_("sum(all_Q_z_cond_x)[:, :10]:\n{}".format(
        np.sum(all_Q_z_cond_x, axis=-1)[:, :10]))

    # (z_dim, num_bins)
    Q_z = np.mean(all_Q_z_cond_x, axis=1)
    log_Q_z = np.log(np.clip(Q_z, eps, 1 - eps))
    print_("Q_z.shape: {}".format(Q_z.shape))
    print_("sum(Q_z): {}".format(np.sum(Q_z, axis=-1)))

    # (z_dim, )
    H_z = -np.sum(Q_z * log_Q_z, axis=-1)

    # Factors
    gt_factors = ['shape', 'scale', 'rotation', 'pos_x', 'pos_y']
    gt_num_values = [3, 6, 40, 32, 32]

    MI_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)
    H_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)

    ids_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.int32)
    MI_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)
    H_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)

    H_y = []
    RMIG = []
    JEMMI = []

    for k, (factor, num_values) in enumerate(zip(gt_factors, gt_num_values)):
        print_("\n#" + "=" * 50 + "#")
        print_("The {}-th gt factor '{}' has {} values!".format(
            k, factor, num_values))

        print_("")
        # (num_samples, num_categories)
        # NOTE: We must use k+1 to account for the 'color' attribute, which is always white
        all_Q_yk_cond_x = one_hot(y_train[:, k + 1],
                                  num_categories=num_values,
                                  dtype=np.float32)
        print_("all_Q_yk_cond_x.shape: {}".format(all_Q_yk_cond_x.shape))

        # (num_categories)
        Q_yk = np.mean(all_Q_yk_cond_x, axis=0)
        log_Q_yk = np.log(np.clip(Q_yk, eps, 1 - eps))
        print_("Q_yk.shape: {}".format(Q_yk.shape))

        H_yk = -np.sum(Q_yk * log_Q_yk)
        print_("H_yk: {}".format(H_yk))
        H_y.append(H_yk)

        Q_z_yk = np.zeros([args.z_dim, num_bins, num_values], dtype=np.float32)

        # Compute I(zi, yk)
        for i in range(args.z_dim):
            print_("\n#" + "-" * 50 + "#")
            all_Q_zi_cond_x = all_Q_z_cond_x[i]
            assert len(all_Q_zi_cond_x) == len(all_Q_yk_cond_x) == num_train, \
                "all_Q_zi_cond_x.shape: {}, all_Q_yk_cond_x.shape: {}".format(
                    all_Q_zi_cond_x.shape, all_Q_yk_cond_x.shape)

            # (num_bins, num_categories)
            Q_zi_yk = np.matmul(np.transpose(all_Q_zi_cond_x, axes=[1, 0]),
                                all_Q_yk_cond_x)
            Q_zi_yk = Q_zi_yk / num_train
            print_("np.sum(Q_zi_yk): {}".format(np.sum(Q_zi_yk)))
            Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps)
            print_("np.sum(Q_zi_yk) (normalized): {}".format(np.sum(Q_zi_yk)))

            assert np.all(Q_zi_yk >= 0), "'Q_zi_yk' contains negative values. " \
                "sorted_Q_zi_yk[:10]:\n{}!".format(np.sort(Q_zi_yk, axis=None))

            # (num_bins, num_categories)
            log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps))

            print_("")
            print_("Q_zi (default): {}".format(Q_z[i]))
            print_("Q_zi (sum of Q_zi_yk over yk): {}".format(
                np.sum(Q_zi_yk, axis=-1)))

            print_("")
            print_("Q_yk (default): {}".format(Q_yk))
            print_("Q_yk (sum of Q_zi_yk over zi): {}".format(
                np.sum(Q_zi_yk, axis=0)))

            MI_zi_yk = Q_zi_yk * (log_Q_zi_yk - np.expand_dims(
                log_Q_z[i], axis=-1) - np.expand_dims(log_Q_yk, axis=0))

            MI_zi_yk = np.sum(MI_zi_yk)
            H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk)

            Q_z_yk[i] = Q_zi_yk
            MI_z_y[i, k] = MI_zi_yk
            H_z_y[i, k] = H_zi_yk

            print_("#" + "-" * 50 + "#")

        # Print statistics for all z
        print_("")
        print_("MI_z_yk:\n{}".format(MI_z_y[:, k]))
        print_("H_z_yk:\n{}".format(H_z_y[:, k]))
        print_("H_z:\n{}".format(H_z))
        print_("H_yk:\n{}".format(H_yk))

        # Compute RMIG and JEMMI
        ids_yk_sorted = np.argsort(MI_z_y[:, k], axis=0)[::-1]
        MI_z_yk_sorted = np.take_along_axis(MI_z_y[:, k],
                                            ids_yk_sorted,
                                            axis=0)
        H_z_yk_sorted = np.take_along_axis(H_z_y[:, k], ids_yk_sorted, axis=0)

        RMIG_yk = np.divide(MI_z_yk_sorted[0] - MI_z_yk_sorted[1], H_yk)
        JEMMI_yk = np.divide(
            H_z_yk_sorted[0] - MI_z_yk_sorted[0] + MI_z_yk_sorted[1],
            H_yk + np.log(num_bins))

        ids_sorted[:, k] = ids_yk_sorted
        MI_z_y_sorted[:, k] = MI_z_yk_sorted
        H_z_y_sorted[:, k] = H_z_yk_sorted

        RMIG.append(RMIG_yk)
        JEMMI.append(JEMMI_yk)

        print_("")
        print_("ids_sorted: {}".format(ids_sorted))
        print_("MI_z_yk_sorted: {}".format(MI_z_yk_sorted))
        print_("RMIG_yk: {}".format(RMIG_yk))
        print_("JEMMI_yk: {}".format(JEMMI_yk))

        z_yk_prob_file = join(
            save_dir,
            "z_yk_prob_4_{}[bins={},bin_limits={},data={}].npz".format(
                factor, num_bins, bin_limits, data_proportion))
        np.savez_compressed(z_yk_prob_file, Q_z_yk=Q_z_yk)
        print_("#" + "=" * 50 + "#")

    results = {
        "MI_z_y": MI_z_y,
        "H_z_y": H_z_y,
        "ids_sorted": ids_sorted,
        "MI_z_y_sorted": MI_z_y_sorted,
        "H_z_y_sorted": H_z_y_sorted,
        "H_z": H_z,
        "H_y": np.asarray(H_y, dtype=np.float32),
        "RMIG": np.asarray(RMIG, dtype=np.float32),
        "JEMMI": np.asarray(JEMMI, dtype=np.float32),
    }
    result_file = join(
        save_dir, "results[bins={},bin_limits={},data={}].npz".format(
            num_bins, bin_limits, data_proportion))
    np.savez_compressed(result_file, **results)

    f.close()
Example #22
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']

    x_train = np.reshape(x_train, [3, 6, 40, 32, 32, 64, 64, 1])

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)), ask_4_permission=True)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=3,
                        suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion

    # Logs
    f = open(join(
        save_dir, 'log[bins={},bin_limits={},data={}].txt'.format(
            num_bins, bin_limits, data_proportion)),
             mode='w')
    print_ = functools.partial(print_both, file=f)

    print_("")
    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Results
    result_file = join(
        args.interpretability_metrics_dir,
        "{}_{}".format(args.enc_dec_model, args.run),
        "results[bins={},bin_limits={},data={}].npz".format(
            num_bins, bin_limits, data_proportion))

    results = np.load(result_file, "r")
    print_("results.keys: {}".format(list(results.keys())))

    # Plotting
    # =========================================== #
    data = [
        x_train[0, 3, 20, 16, 16], x_train[1, 3, 20, 16, 16], x_train[2, 3, 20,
                                                                      16, 16]
    ]

    gt_factors = ['Shape', 'Scale', 'Rotation', 'Pos_x', 'Pos_y']
    ids_sorted = results['ids_sorted']
    MI_z_y_sorted = results['MI_z_y_sorted']
    H_z_y_sorted = results['H_z_y_sorted']
    H_y = results['H_y']
    RMIG = results['RMIG']
    JEMMI = results['JEMMI']

    print_("MI_z_y_sorted:\n{}".format(MI_z_y_sorted))

    print_("\nShow RMIG!")
    for k in range(len(gt_factors)):
        print_(
            "{}, RMIG: {:.4f}, RMIG (unnorm): {:.4f}, H: {:.4f}, I1: {:.4f}, I2: {:.4f}"
            .format(gt_factors[k], RMIG[k], RMIG[k] * H_y[k], H_y[k],
                    MI_z_y_sorted[0, k], MI_z_y_sorted[1, k]))

    print_("\nShow JEMMI!")
    for k in range(len(gt_factors)):
        print_(
            "{}, JEMMI: {:.4f}, JEMMI (unnorm): {:.4f}, H1: {:.4f}, H1-I1: {:.4f}, I2: {:.4f}, "
            "top2 ids: z{}, z{}".format(
                gt_factors[k], JEMMI[k],
                JEMMI[k] * (H_y[k] + np.log(num_bins)), H_z_y_sorted[0, k],
                H_z_y_sorted[0, k] - MI_z_y_sorted[0, k], MI_z_y_sorted[1, k],
                ids_sorted[0, k], ids_sorted[1, k]))

    span = 3
    points_one_side = 5

    for n in range(len(data)):
        for k in range(len(gt_factors)):
            print("x={}, y={}!".format(n, gt_factors[k]))

            img_file = join(
                save_dir, "{}[x={},bins={},bin_limits={},data={}].png".format(
                    gt_factors[k], n, num_bins, bin_limits, data_proportion))
            '''
            ids_top10 = ids_sorted[:10, k]
            MI_top10 = MI_z_y_sorted[:10, k]
            model.cond_all_latents_traverse_v2(img_file, sess, data[n],
                                               z_comps=ids_top10,
                                               z_comp_labels=["z[{}] ({:.4f})".format(comp, mi)
                                                              for comp, mi in zip(ids_top10, MI_top10)],
                                               span=span, points_1_side=points_one_side,
                                               hl_x=True,
                                               font_size=9,
                                               title="{} (RMIG={:.4f}, JEMMI={:.4f}, H={:.4f})".format(
                                                   gt_factors[k], RMIG[k], JEMMI[k], H_y[k]),
                                               title_font_scale=1.5,
                                               subplot_adjust={'left': 0.16, 'right': 0.99,
                                                               'bottom': 0.01, 'top': 0.95},
                                               size_inches=(6.5, 5.2),
                                               batch_size=args.batch_size,
                                               dec_output_2_img_func=binary_float_to_uint8)
            '''

            ids_top3 = ids_sorted[:3, k]
            MI_top3 = MI_z_y_sorted[:3, k]
            model.cond_all_latents_traverse_v2(
                img_file,
                sess,
                data[n],
                z_comps=ids_top3,
                z_comp_labels=[
                    "z[{}] ({:.4f})".format(comp, mi)
                    for comp, mi in zip(ids_top3, MI_top3)
                ],
                span=span,
                points_1_side=points_one_side,
                hl_x=True,
                font_size=9,
                title="{} (RMIG={:.4f}, JEMMI={:.4f}, H={:.4f})".format(
                    gt_factors[k], RMIG[k], JEMMI[k], H_y[k]),
                title_font_scale=1.5,
                subplot_adjust={
                    'left': 0.16,
                    'right': 0.99,
                    'bottom': 0.01,
                    'top': 0.88
                },
                size_inches=(6.2, 1.7),
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)

    f.close()
Example #23
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model))

    model = AAE([img_height, img_width, 3], args.z_dim,
                encoder=encoder, decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True, gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=True)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(join(args.save_dir, "AAE_{}".format(args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan, linewidth=1000, precision=4, suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * celebA_loader.num_train_data)
    eps = 1e-8

    # file
    f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'.
                  format(num_bins, bin_limits, data_proportion)), mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean and all_z_stddev!")
        count = 0
        for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False):
            x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids)

            z_mean, z_stddev = sess.run(
                model.get_output(['z_mean', 'z_stddev']),
                feed_dict={model.is_train: False, model.x_ph: x})

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']

    print_("")
    print_("all_z_mean.shape: {}".format(all_z_mean.shape))
    print_("all_z_stddev.shape: {}".format(all_z_stddev.shape))
    # ================================= #

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths) == num_bins, "len(bin_widths)={} while num_bins={}!".format(len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers) == num_bins, "len(bin_centers)={} while num_bins={}!".format(len(bin_centers), num_bins)
    # ================================= #

    # Compute mutual information
    # ================================= #
    H_z = []
    H_z_cond_x = []
    MI_z_x = []
    norm_MI_z_x = []
    Q_z_cond_x = []
    Q_z = []

    for i in range(args.z_dim):
        print_("")
        print_("Compute I(z{}, x)!".format(i))

        # Q_s_cond_x
        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(np.expand_dims(bin_centers, axis=0),
                                        mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                                        stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                                  deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        print_("sort(sum(all_Q_s_cond_x))[:10]: {}".format(
            np.sort(np.sum(all_Q_s_cond_x, axis=-1), axis=0)[:100]))
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(np.sort(all_Q_s_cond_x[:30], axis=None))
        Q_z_cond_x.append(all_Q_s_cond_x)

        H_zi_cond_x = -np.mean(np.sum(all_Q_s_cond_x * np.log(np.maximum(all_Q_s_cond_x, eps)), axis=1), axis=0)

        # Q_s
        Q_s = np.mean(all_Q_s_cond_x, axis=0)
        print_("Q_s: {}".format(Q_s))
        print_("sum(Q_s): {}".format(sum(Q_s)))
        assert np.all(Q_s >= 0), "'Q_s' contains negative values. " \
            "sorted_Q_s[:10]:\n{}!".format(np.sort(Q_s, axis=None))

        Q_s = Q_s / np.sum(Q_s, axis=0)
        print_("sum(Q_s) (normalized): {}".format(sum(Q_s)))

        Q_z.append(Q_s)

        H_zi = -np.sum(Q_s * np.log(np.maximum(Q_s, eps)), axis=0)

        MI_zi_x = H_zi - H_zi_cond_x
        normalized_MI_zi_x = (1.0 * MI_zi_x) / (H_zi + eps)

        print_("H_zi: {}".format(H_zi))
        print_("H_zi_cond_x: {}".format(H_zi_cond_x))
        print_("MI_zi_x: {}".format(MI_zi_x))
        print_("normalized_MI_zi_x: {}".format(normalized_MI_zi_x))

        H_z.append(H_zi)
        H_z_cond_x.append(H_zi_cond_x)
        MI_z_x.append(MI_zi_x)
        norm_MI_z_x.append(normalized_MI_zi_x)

    H_z = np.asarray(H_z, dtype=np.float32)
    H_z_cond_x = np.asarray(H_z_cond_x, dtype=np.float32)
    MI_z_x = np.asarray(MI_z_x, dtype=np.float32)
    norm_MI_z_x = np.asarray(norm_MI_z_x, dtype=np.float32)

    print_("")
    print_("H_z: {}".format(H_z))
    print_("H_z_cond_x: {}".format(H_z_cond_x))
    print_("MI_z_x: {}".format(MI_z_x))
    print_("norm_MI_z_x: {}".format(norm_MI_z_x))

    sorted_z_comps = np.argsort(MI_z_x, axis=0)[::-1]
    sorted_MI_z_x = np.take_along_axis(MI_z_x, sorted_z_comps, axis=0)
    print_("sorted_MI_z_x: {}".format(sorted_MI_z_x))
    print_("sorted_z_comps: {}".format(sorted_z_comps))

    sorted_norm_z_comps = np.argsort(norm_MI_z_x, axis=0)[::-1]
    sorted_norm_MI_z_x = np.take_along_axis(norm_MI_z_x, sorted_norm_z_comps, axis=0)
    print_("sorted_norm_MI_z_x: {}".format(sorted_norm_MI_z_x))
    print_("sorted_norm_z_comps: {}".format(sorted_norm_z_comps))

    result_file = join(save_dir, 'results[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))

    np.savez_compressed(result_file,
                        H_z=H_z, H_z_cond_x=H_z_cond_x, MI_z_x=MI_z_x, norm_MI_z_x=norm_MI_z_x,
                        sorted_MI_z_x=sorted_MI_z_x, sorted_z_comps=sorted_z_comps,
                        sorted_norm_MI_z_x=sorted_norm_MI_z_x,
                        sorted_norm_z_comps=sorted_norm_z_comps)

    Q_z_cond_x = np.asarray(Q_z_cond_x, dtype=np.float32)
    Q_z = np.asarray(Q_z, dtype=np.float32)
    z_prob_file = join(save_dir, 'z_prob[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))
    np.savez_compressed(z_prob_file, Q_z_cond_x=Q_z_cond_x, Q_z=Q_z)
Example #24
0
def main():
    raw_data_file = download_if_not_exist(DIR_RAW, "cifar-10-python.tar.gz",
                                          DATASET_URL)
    dataset = extract_data_and_labels(raw_data_file)

    train_data, train_labels = dataset['train_data'], dataset['train_labels']
    test_data, test_labels = dataset['test_data'], dataset['test_labels']
    label_names = dataset['label_names']

    modes = ['bytes', '0to1', 'm1p1']

    for mode in modes:
        print("\nCreate the '{}' version of CIFAR10!".format(mode))
        if mode == 'bytes':
            processed_train_data = train_data
            processed_test_data = test_data
        elif mode == '0to1':
            processed_train_data = uint8_to_float(train_data,
                                                  pixel_inv_scale=255,
                                                  pixel_shift=0)
            processed_test_data = uint8_to_float(test_data,
                                                 pixel_inv_scale=255,
                                                 pixel_shift=0)
        elif mode == 'm1p1':
            processed_train_data = uint8_to_float(train_data,
                                                  pixel_inv_scale=127.5,
                                                  pixel_shift=-1)
            processed_test_data = uint8_to_float(test_data,
                                                 pixel_inv_scale=127.5,
                                                 pixel_shift=-1)
        else:
            raise ValueError("Only support 'mode' in {}!".format(modes))

        save_dir = make_dir_if_not_exist(join(DIR_PROCESSED, mode))

        np.savez_compressed(join(save_dir, "train.npz"),
                            x=processed_train_data,
                            y=train_labels,
                            y_names=label_names)
        np.savez_compressed(join(save_dir, "test.npz"),
                            x=processed_test_data,
                            y=test_labels,
                            y_names=label_names)

        num_cols = 5
        train_idx = 100
        test_idx = 100

        # Uncomment if you want to show images
        # ---------------------------------- #
        '''
        import matplotlib.pyplot as plt
        fig, axes = plt.subplots(2, num_cols)
        for n in range(num_cols):
            if mode == "bytes" or mode == "0to1":
                plot_image(axes[0][n], processed_train_data[train_idx + n], title="train[{}]: {}".
                           format(train_idx + n, label_names[train_labels[train_idx + n]]))

                plot_image(axes[1][n], processed_test_data[test_idx + n], title="test[{}]: {}".
                           format(test_idx + n, label_names[test_labels[test_idx + n]]))

            elif mode == "m1p1":
                plot_image(axes[0][n], (processed_train_data[train_idx + n] + 1.0) / 2.0, title="train[{}]: {}".
                           format(train_idx + n, label_names[train_labels[train_idx + n]]))

                plot_image(axes[1][n], (processed_test_data[test_idx + n] + 1.0) / 2.0, title="test[{}]: {}".
                           format(test_idx + n, label_names[test_labels[test_idx + n]]))

        plt.show()
        '''
        # ---------------------------------- #

    from shutil import copyfile
    copyfile(join(DIR_PROCESSED, "bytes", "train.npz"),
             join(DIR_PROCESSED, "train.npz"))
    copyfile(join(DIR_PROCESSED, "bytes", "test.npz"),
             join(DIR_PROCESSED, "test.npz"))
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny",
                                         resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(
            args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim,
                                 stochastic=True,
                                 activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3],
                                 activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(
            args.enc_dec_model))

    model = AAE([img_height, img_width, 3],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=False)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "AAE_{}".format(args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=3,
                        suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * celebA_loader.num_train_data)
    eps = 1e-8

    # file
    f = open(join(
        save_dir, 'log[bins={},bin_limits={},data={}].txt'.format(
            num_bins, bin_limits, data_proportion)),
             mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)
    '''
    if attr_type == 0:
        attr_names = celebA_loader.attributes
    elif attr_type == 1:
        attr_names = ['Male', 'Black_Hair', 'Blond_Hair', 'Straight_Hair', 'Wavy_Hair', 'Bald',
                      'Oval_Face', 'Big_Nose', 'Chubby', 'Double_Chin', 'Goatee', 'No_Beard',
                      'Mouth_Slightly_Open', 'Smiling',
                      'Eyeglasses', 'Pale_Skin']
    else:
        raise ValueError("Only support factor_type=0 or 1!")
    '''

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[
        1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0],
                       bin_limits[1],
                       num_bins + 1,
                       endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths
               ) == num_bins, "len(bin_widths)={} while num_bins={}!".format(
                   len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths,
                             0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers
               ) == num_bins, "len(bin_centers)={} while num_bins={}!".format(
                   len(bin_centers), num_bins)
    # ================================= #

    # Compute representations
    # ================================= #
    z_data_attr_file = join(save_dir,
                            "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_attr_file):
        all_z_mean = []
        all_z_stddev = []
        all_attrs = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_data,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = celebA_loader.sample_images_from_dataset(
                sess, 'train', batch_ids)
            attrs = celebA_loader.sample_attrs_from_dataset('train', batch_ids)
            assert attrs.shape[1] == celebA_loader.num_attributes

            z_mean, z_stddev = sess.run(model.get_output(
                ['z_mean', 'z_stddev']),
                                        feed_dict={
                                            model.is_train: False,
                                            model.x_ph: x
                                        })

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)
            all_attrs.append(attrs)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)
        all_attrs = np.concatenate(all_attrs, axis=0)

        np.savez_compressed(z_data_attr_file,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev,
                            all_attrs=all_attrs)
    else:
        print("{} exists. Load data from file!".format(z_data_attr_file))
        with np.load(z_data_attr_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
            all_attrs = f['all_attrs']

    print_("")
    print_("all_z_mean.shape: {}".format(all_z_mean.shape))
    print_("all_z_stddev.shape: {}".format(all_z_stddev.shape))
    print_("all_attrs.shape: {}".format(all_attrs.shape))
    # ================================= #

    # Compute the probability mass function for ground truth factors
    # ================================= #
    num_attrs = all_attrs.shape[1]

    assert all_attrs.dtype == np.bool
    all_attrs = all_attrs.astype(np.int32)

    # (num_samples, num_attrs, 2)    # The first component is 1 and the last component is 0
    all_Q_y_cond_x = np.stack([all_attrs, 1 - all_attrs], axis=-1)
    # ================================= #

    # Compute Q(zi|x)
    # Compute I(zi, yk)
    # ================================= #
    Q_z_y = np.zeros([args.z_dim, num_attrs, num_bins, 2], dtype=np.float32)
    MI_z_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32)
    H_z_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32)
    H_z_4_diff_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32)
    H_y_4_diff_z = np.zeros([num_attrs, args.z_dim], dtype=np.float32)

    for i in range(args.z_dim):
        print_("")
        print_("Compute all_Q_z{}_cond_x!".format(i))

        # Q_s_cond_x
        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean),
                                      500,
                                      shuffle=False,
                                      include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(
                np.expand_dims(bin_centers, axis=0),
                mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i],
                                    bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(
                np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(
                np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        # (num_samples, num_bins)
        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
                                            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(
            np.sort(all_Q_s_cond_x[:30], axis=None))

        assert len(all_Q_s_cond_x) == len(
            all_attrs), "all_Q_s_cond_x.shape={}, all_attrs.shape={}".format(
                all_Q_s_cond_x.shape, all_attrs.shape)

        # I(z, y)
        for k in range(num_attrs):
            # Compute Q(zi, yk)
            # -------------------------------- #
            # (z_dim, 2)
            Q_zi_yk = np.matmul(np.transpose(all_Q_s_cond_x, axes=[1, 0]),
                                all_Q_y_cond_x[:, k, :])
            Q_zi_yk = Q_zi_yk / len(all_Q_y_cond_x)
            Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps)

            assert np.all(Q_zi_yk >= 0), "'Q_zi_yk' contains negative values. " \
                "sorted_Q_zi_yk[:10]:\n{}!".format(np.sort(Q_zi_yk, axis=None))

            log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps))

            Q_z_y[i, k] = Q_zi_yk
            print_("sum(Q_zi_yk): {}".format(np.sum(Q_zi_yk)))
            # -------------------------------- #

            # Compute Q_z
            # -------------------------------- #
            Q_zi = np.sum(Q_zi_yk, axis=1)
            log_Q_zi = np.log(np.clip(Q_zi, eps, 1 - eps))
            print_("sum(Q_z{}): {}".format(i, np.sum(Q_zi)))
            print_("Q_z{}: {}".format(i, Q_zi))
            # -------------------------------- #

            # Compute Q_y
            # -------------------------------- #
            Q_yk = np.sum(Q_zi_yk, axis=0)
            log_Q_yk = np.log(np.clip(Q_yk, eps, 1 - eps))
            print_("sum(Q_y{}): {}".format(k, np.sum(Q_yk)))
            print_("Q_y{}: {}".format(k, np.sum(Q_yk)))
            # -------------------------------- #

            MI_zi_yk = Q_zi_yk * (log_Q_zi_yk - np.expand_dims(
                log_Q_zi, axis=-1) - np.expand_dims(log_Q_yk, axis=0))

            MI_zi_yk = np.sum(MI_zi_yk)
            H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk)
            H_zi = -np.sum(Q_zi * log_Q_zi)
            H_yk = -np.sum(Q_yk * log_Q_yk)

            MI_z_y[i, k] = MI_zi_yk
            H_z_y[i, k] = H_zi_yk
            H_z_4_diff_y[i, k] = H_zi
            H_y_4_diff_z[k, i] = H_yk
    # ================================= #

    print_("")
    print_("MI_z_y:\n{}".format(MI_z_y))
    print_("H_z_y:\n{}".format(H_z_y))
    print_("H_z_4_diff_y:\n{}".format(H_z_4_diff_y))
    print_("H_y_4_diff_z:\n{}".format(H_y_4_diff_z))

    # Compute metric
    # ================================= #
    # Sorted in decreasing order
    MI_ids_sorted = np.argsort(MI_z_y, axis=0)[::-1]
    MI_sorted = np.take_along_axis(MI_z_y, MI_ids_sorted, axis=0)

    MI_gap_y = np.divide(MI_sorted[0, :] - MI_sorted[1, :], H_y_4_diff_z[:, 0])
    MIG = np.mean(MI_gap_y)

    print_("")
    print_("MI_sorted: {}".format(MI_sorted))
    print_("MI_ids_sorted: {}".format(MI_ids_sorted))
    print_("MI_gap_y: {}".format(MI_gap_y))
    print_("MIG: {}".format(MIG))

    results = {
        'Q_z_y': Q_z_y,
        'MI_z_y': MI_z_y,
        'H_z_y': H_z_y,
        'H_z_4_diff_y': H_z_4_diff_y,
        'H_y_4_diff_z': H_y_4_diff_z,
        'MI_sorted': MI_sorted,
        'MI_ids_sorted': MI_ids_sorted,
        'MI_gap_y': MI_gap_y,
        'MIG': MIG,
    }

    result_file = join(
        save_dir, 'results[bins={},bin_limits={},data={}].npz'.format(
            num_bins, bin_limits, data_proportion))
    np.savez_compressed(result_file, **results)
    # ================================= #

    f.close()
Example #26
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(os.path.join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Preparation
    # =====================================
    data_file = os.path.join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                             "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)

    # =====================================
    # Instantiate models
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_list()

    # =====================================
    # TF Graph Handler
    asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset"))
    img_eval = remove_dir_if_exist(os.path.join(asset_dir, "img_eval"),
                                   ask_4_permission=False)
    img_eval = make_dir_if_not_exist(img_eval)

    img_x_rec = make_dir_if_not_exist(os.path.join(img_eval, "x_rec"))
    img_z_rand_2_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_rand_2_traversal"))
    img_z_cond_all_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_cond_all_traversal"))
    img_z_cond_1_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_cond_1_traversal"))
    img_z_corr = make_dir_if_not_exist(os.path.join(img_eval, "z_corr"))
    img_z_dist = make_dir_if_not_exist(os.path.join(img_eval, "z_dist"))
    img_z_stat_dist = make_dir_if_not_exist(
        os.path.join(img_eval, "z_stat_dist"))
    img_rec_error_dist = make_dir_if_not_exist(
        os.path.join(img_eval, "rec_error_dist"))

    model_dir = make_dir_if_not_exist(os.path.join(args.output_dir,
                                                   "model_tf"))

    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)
    # =====================================

    # =====================================
    # Training Loop
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    #'''
    # Reconstruction
    # ======================================= #
    seed = 389
    x = x_train[np.arange(seed, seed + 64)]

    img_file = os.path.join(img_x_rec, 'x_rec_train.png')
    model.reconstruct_images(img_file,
                             sess,
                             x,
                             block_shape=[8, 8],
                             batch_size=-1,
                             dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #

    # z random/conditional traversal
    # ======================================= #
    # Plot z cont with z cont
    z_zero = np.zeros([args.z_dim], dtype=np.float32)
    z_rand = np.random.randn(args.z_dim)
    z_start, z_stop = -4, 4
    num_points = 8

    for i in range(args.z_dim):
        for j in range(i + 1, args.z_dim):
            print("Plot random 2 comps z traverse with {} and {} components!".
                  format(i, j))

            img_file = os.path.join(img_z_rand_2_traversal,
                                    'z[{},{},zero].png'.format(i, j))
            model.rand_2_latents_traverse(
                img_file,
                sess,
                default_z=z_zero,
                z_comp1=i,
                start1=z_start,
                stop1=z_stop,
                num_points1=num_points,
                z_comp2=j,
                start2=z_start,
                stop2=z_stop,
                num_points2=num_points,
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)

            img_file = os.path.join(img_z_rand_2_traversal,
                                    'z[{},{},rand].png'.format(i, j))
            model.rand_2_latents_traverse(
                img_file,
                sess,
                default_z=z_rand,
                z_comp1=i,
                start1=z_start,
                stop1=z_stop,
                num_points1=num_points,
                z_comp2=j,
                start2=z_stop,
                stop2=z_stop,
                num_points2=num_points,
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)

    seed = 389
    z_start, z_stop = -4, 4
    num_itpl_points = 8

    for n in range(seed, seed + 30):
        print("Plot conditional all comps z traverse with test sample {}!".
              format(n))

        x = x_train[n]
        img_file = os.path.join(img_z_cond_all_traversal,
                                'x_train{}.png'.format(n))
        model.cond_all_latents_traverse(
            img_file,
            sess,
            x,
            start=z_start,
            stop=z_stop,
            num_itpl_points=num_itpl_points,
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)

    seed = 64
    z_start, z_stop = -4, 4
    num_itpl_points = 8
    print("Plot conditional 1 comp z traverse!")
    for i in range(args.z_dim):
        x = x_train[seed:seed + 64]
        img_file = os.path.join(
            img_z_cond_1_traversal,
            'x_train[{},{}]_z{}.png'.format(seed, seed + 64, i))
        model.cond_1_latent_traverse(
            img_file,
            sess,
            x,
            z_comp=i,
            start=z_start,
            stop=z_stop,
            num_itpl_points=num_itpl_points,
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #
    # '''

    # z correlation matrix
    # ======================================= #
    data = x_train

    all_z = []
    for batch_ids in iterate_data(len(data), args.batch_size, shuffle=False):
        x = data[batch_ids]

        z = model.encode(sess, x)
        assert len(
            z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format(
                z.shape)

        all_z.append(z)

    all_z = np.concatenate(all_z, axis=0)

    print("Start plotting!")
    plot_corrmat_with_histogram(os.path.join(img_z_corr, "corr_mat.png"),
                                all_z)
    plot_comp_dist(os.path.join(img_z_dist, 'z_{}'), all_z, x_lim=(-5, 5))
    print("Done!")
    # ======================================= #

    # '''
    # z gaussian stddev
    # ======================================= #
    print("\nPlot z mean and stddev!")
    data = x_train

    all_z_mean = []
    all_z_stddev = []

    for batch_ids in iterate_data(len(data), args.batch_size, shuffle=False):
        x = data[batch_ids]

        z_mean, z_stddev = sess.run(model.get_output(['z_mean', 'z_stddev']),
                                    feed_dict={
                                        model.is_train: False,
                                        model.x_ph: x
                                    })

        all_z_mean.append(z_mean)
        all_z_stddev.append(z_stddev)

    all_z_mean = np.concatenate(all_z_mean, axis=0)
    all_z_stddev = np.concatenate(all_z_stddev, axis=0)

    plot_comp_dist(os.path.join(img_z_stat_dist, 'z_mean_{}.png'),
                   all_z_mean,
                   x_lim=(-5, 5))
    plot_comp_dist(os.path.join(img_z_stat_dist, 'z_stddev_{}.png'),
                   all_z_stddev,
                   x_lim=(0, 3))
    # ======================================= #
    # '''

    # Decoder sensitivity
    # ======================================= #
    z_start = -3
    z_stop = 3
    for i in range(args.z_dim):
        print("\nPlot rec error distribution for z component {}!".format(i))

        all_z1 = np.array(all_z, copy=True, dtype=np.float32)
        all_z2 = np.array(all_z, copy=True, dtype=np.float32)

        all_z1[:, i] = z_start
        all_z2[:, i] = z_stop

        all_x_rec1 = []
        all_x_rec2 = []
        for batch_ids in iterate_data(len(x_train),
                                      args.batch_size,
                                      shuffle=False):
            z1 = all_z1[batch_ids]
            z2 = all_z2[batch_ids]

            x1 = model.decode(sess, z1)
            x2 = model.decode(sess, z2)

            all_x_rec1.append(x1)
            all_x_rec2.append(x2)

        all_x_rec1 = np.concatenate(all_x_rec1, axis=0)
        all_x_rec2 = np.concatenate(all_x_rec2, axis=0)

        rec_errors = np.sum(np.reshape((all_x_rec1 - all_x_rec2)**2,
                                       [len(x_train), 28 * 28]),
                            axis=1)
        plot_comp_dist(
            os.path.join(
                img_rec_error_dist,
                'rec_error[zi={},{},{}].png'.format(i, z_start, z_stop)),
            rec_errors)
Example #27
0
def plot_JEMMIG_quan_tc_beta(save_dir, JEMMIG_quan_result_files, labels):
    JEMMIGs_all = []
    JEMMIGs_by_tc = {}
    JEMMIGs_by_beta = {}
    JEMMIGs_by_Gz = {}

    for i in range(len(JEMMIG_quan_result_files)):
        JEMMIG_results = np.load(JEMMIG_quan_result_files[i], "r")
        MI_z_y_sorted = JEMMIG_results['MI_z_y_sorted']
        H_z_y_sorted = JEMMIG_results['H_z_y_sorted']
        JEMMIG = np.mean(H_z_y_sorted[0, :] - MI_z_y_sorted[0, :] +
                         MI_z_y_sorted[1, :],
                         axis=0)

        JEMMIGs_all.append(JEMMIG)

        idx = labels[i].find('tc')
        if idx < 0:
            idx = labels[i].find('beta')
            if idx < 0:
                idx = labels[i].find('Gz')
                assert idx >= 0, "labels[{}]='{}'".format(i, labels[i])

                Gz = int(labels[i][idx + len('Gz'):idx + len('Gz') + 2])
                jemmig_list = JEMMIGs_by_Gz.get(Gz)
                if jemmig_list is None:
                    JEMMIGs_by_Gz[Gz] = [JEMMIG]
                else:
                    jemmig_list.append(JEMMIG)

            else:
                beta = int(labels[i][idx + len('beta'):])
                jemmig_list = JEMMIGs_by_beta.get(beta)
                if jemmig_list is None:
                    JEMMIGs_by_beta[beta] = [JEMMIG]
                else:
                    jemmig_list.append(JEMMIG)
        else:
            tc = int(labels[i][idx + len('tc'):])
            jemmig_list = JEMMIGs_by_tc.get(tc)
            if jemmig_list is None:
                JEMMIGs_by_tc[tc] = [JEMMIG]
            else:
                jemmig_list.append(JEMMIG)

    tc_list = ["{}".format(tc) for tc, _ in iteritems(JEMMIGs_by_tc)]
    JEMMIGs_mean_by_tc = [(tc, np.mean(jemmig_list))
                          for tc, jemmig_list in iteritems(JEMMIGs_by_tc)]
    JEMMIGs_std_by_tc = [(tc, np.std(jemmig_list))
                         for tc, jemmig_list in iteritems(JEMMIGs_by_tc)]

    beta_list = ["{}".format(beta) for beta, _ in iteritems(JEMMIGs_by_beta)]
    JEMMIGs_mean_by_beta = [(beta, np.mean(jemmig_list))
                            for beta, jemmig_list in iteritems(JEMMIGs_by_beta)
                            ]
    JEMMIGs_std_by_beta = [(beta, np.std(jemmig_list))
                           for beta, jemmig_list in iteritems(JEMMIGs_by_beta)]

    Gz_list = ["{}".format(Gz) for Gz, _ in iteritems(JEMMIGs_by_Gz)]
    JEMMIGs_mean_by_Gz = [(Gz, np.mean(jemmig_list))
                          for Gz, jemmig_list in iteritems(JEMMIGs_by_Gz)]
    JEMMIGs_std_by_Gz = [(Gz, np.std(jemmig_list))
                         for Gz, jemmig_list in iteritems(JEMMIGs_by_Gz)]

    # Plotting RMIG-MIG relationship
    # =========================================== #
    font = {'family': 'normal', 'size': 12}

    matplotlib.rc('font', **font)

    width = 0.5
    plt.bar(range(0, len(tc_list)), [a[1] for a in JEMMIGs_mean_by_tc],
            yerr=[a[1] for a in JEMMIGs_std_by_tc],
            width=width,
            align='center',
            label="TC")
    plt.bar(range(len(tc_list),
                  len(beta_list) + len(tc_list)),
            [a[1] for a in JEMMIGs_mean_by_beta],
            yerr=[a[1] for a in JEMMIGs_std_by_beta],
            width=width,
            align='center',
            label="Beta")
    plt.bar(range(
        len(beta_list) + len(tc_list),
        len(beta_list) + len(tc_list) + len(Gz_list)),
            [a[1] for a in JEMMIGs_mean_by_Gz],
            yerr=[a[1] for a in JEMMIGs_std_by_Gz],
            width=width,
            align='center',
            label="Gz")
    plt.xticks(range(0,
                     len(tc_list) + len(beta_list) + len(Gz_list)),
               tc_list + beta_list + Gz_list)

    plt.legend()
    plt.xlabel("model")
    plt.ylabel("JEMMIG (quantization)")
    plt.ylim(bottom=3)
    # plt.tight_layout()

    subplot_adjust = {'left': 0.08, 'right': 0.99, 'bottom': 0.17, 'top': 0.98}
    plt.subplots_adjust(**subplot_adjust)
    plt.gcf().set_size_inches(9, 3.2)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "JEMMIG_tc_beta_Gz.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
Example #28
0
def plot_RMIG_tc_beta(save_dir, JEMMIG_result_files, labels):
    RMIGs_all = []
    RMIGs_by_tc = {}
    RMIGs_by_beta = {}
    RMIGs_by_Gz = {}

    for i in range(len(JEMMIG_result_files)):
        RMIG_results = np.load(JEMMIG_result_files[i], "r")
        RMIGs_all.append(np.mean(RMIG_results['RMIG_yk']))

        idx = labels[i].find('tc')
        if idx < 0:
            idx = labels[i].find('beta')
            if idx < 0:
                idx = labels[i].find('Gz')
                assert idx >= 0, "labels[{}]='{}'".format(i, labels[i])

                Gz = int(labels[i][idx + len('Gz'):idx + len('Gz') + 2])
                rmig_list = RMIGs_by_Gz.get(Gz)
                if rmig_list is None:
                    RMIGs_by_Gz[Gz] = [np.mean(RMIG_results['RMIG_yk'])]
                else:
                    rmig_list.append(np.mean(RMIG_results['RMIG_yk']))
            else:
                beta = int(labels[i][idx + len('beta'):])
                rmig_list = RMIGs_by_beta.get(beta)
                if rmig_list is None:
                    RMIGs_by_beta[beta] = [np.mean(RMIG_results['RMIG_yk'])]
                else:
                    rmig_list.append(np.mean(RMIG_results['RMIG_yk']))
        else:
            tc = int(labels[i][idx + len('tc'):])
            rmig_list = RMIGs_by_tc.get(tc)
            if rmig_list is None:
                RMIGs_by_tc[tc] = [np.mean(RMIG_results['RMIG_yk'])]
            else:
                rmig_list.append(np.mean(RMIG_results['RMIG_yk']))

    tc_list = ["{}".format(tc) for tc, _ in iteritems(RMIGs_by_tc)]
    RMIGs_mean_by_tc = [(tc, np.mean(rmig_list))
                        for tc, rmig_list in iteritems(RMIGs_by_tc)]
    RMIGs_std_by_tc = [(tc, np.std(rmig_list))
                       for tc, rmig_list in iteritems(RMIGs_by_tc)]

    beta_list = ["{}".format(beta) for beta, _ in iteritems(RMIGs_by_beta)]
    RMIGs_mean_by_beta = [(beta, np.mean(rmig_list))
                          for beta, rmig_list in iteritems(RMIGs_by_beta)]
    RMIGs_std_by_beta = [(beta, np.std(rmig_list))
                         for beta, rmig_list in iteritems(RMIGs_by_beta)]

    Gz_list = ["{}".format(Gz) for Gz, _ in iteritems(RMIGs_by_Gz)]
    RMIGs_mean_by_Gz = [(Gz, np.mean(rmig_list))
                        for Gz, rmig_list in iteritems(RMIGs_by_Gz)]
    RMIGs_std_by_Gz = [(Gz, np.std(rmig_list))
                       for Gz, rmig_list in iteritems(RMIGs_by_Gz)]

    print("RMIGs_by_tc: {}".format(RMIGs_by_tc))
    print("RMIGs_by_beta: {}".format(RMIGs_by_beta))

    # Plotting RMIG-MIG relationship
    # =========================================== #
    font = {'family': 'normal', 'size': 12}

    matplotlib.rc('font', **font)

    width = 0.5
    plt.bar(range(0, len(tc_list)), [a[1] for a in RMIGs_mean_by_tc],
            yerr=[a[1] for a in RMIGs_std_by_tc],
            width=width,
            align='center',
            label="TC")
    plt.bar(range(len(tc_list),
                  len(beta_list) + len(tc_list)),
            [a[1] for a in RMIGs_mean_by_beta],
            yerr=[a[1] for a in RMIGs_std_by_beta],
            width=width,
            align='center',
            label="Beta")
    plt.bar(range(
        len(beta_list) + len(tc_list),
        len(beta_list) + len(tc_list) + len(Gz_list)),
            [a[1] for a in RMIGs_mean_by_Gz],
            yerr=[a[1] for a in RMIGs_std_by_Gz],
            width=width,
            align='center',
            label="Gz")
    plt.xticks(range(0,
                     len(tc_list) + len(beta_list) + len(Gz_list)),
               tc_list + beta_list + Gz_list)

    plt.legend()
    plt.xlabel("model")
    plt.ylabel("RMIG")
    # plt.tight_layout()

    subplot_adjust = {'left': 0.11, 'right': 0.99, 'bottom': 0.17, 'top': 0.98}
    plt.subplots_adjust(**subplot_adjust)
    plt.gcf().set_size_inches(9, 3)

    save_dir = make_dir_if_not_exist(save_dir)
    save_file = join(save_dir, "RMIG_tc_beta_Gz.pdf")

    with PdfPages(save_file) as pdf_file:
        plt.savefig(pdf_file, format='pdf')

    plt.show()
    plt.close()
Example #29
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']

    x_train = np.reshape(x_train, [3, 6, 40, 32, 32, 64, 64, 1])

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = FactorVAE([64, 64, 1],
                      args.z_dim,
                      encoder=encoder,
                      decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True,
                      gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
        'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=3,
                        suppress=True)

    result_file = join(args.JEMMIG_sampling_dir,
                       "{}_{}".format(args.enc_dec_model, args.run),
                       "results[num_samples={}].npz".format(args.num_samples))

    results = np.load(result_file, "r")
    print("results.keys: {}".format(list(results.keys())))

    # Plotting
    # =========================================== #
    data = [
        x_train[0, 3, 20, 16, 16], x_train[1, 3, 20, 16, 16], x_train[2, 3, 20,
                                                                      16, 16]
    ]

    gt_factors = ['Shape', 'Scale', 'Rotation', 'Pos_x', 'Pos_y']
    ids_sorted = results['id_sorted']

    MI_zi_yk_sorted = results['MI_zi_yk_sorted']
    H_zi_yk_sorted = results['H_zi_yk_sorted']

    H_yk = results['H_yk']
    RMIG_yk = results['RMIG_yk']
    RMIG_norm_yk = results['RMIG_norm_yk']
    JEMMIG_yk = results['JEMMIG_yk']

    print("MI_zi_yk_sorted:\n{}".format(MI_zi_yk_sorted))

    print("\nShow RMIG!")
    for k in range(len(gt_factors)):
        print(
            "{}, RMIG: {:.4f}, RMIG (norm): {:.4f}, H: {:.4f}, I1: {:.4f}, I2: {:.4f}"
            .format(gt_factors[k], RMIG_yk[k], RMIG_norm_yk[k], H_yk[k],
                    MI_zi_yk_sorted[0, k], MI_zi_yk_sorted[1, k]))

    print("\nShow JEMMIG!")
    for k in range(len(gt_factors)):
        print(
            "{}, JEMMIG: {:.4f}, H1: {:.4f}, H1-I1: {:.4f}, I2: {:.4f}, top2 ids: z{}, z{}"
            .format(gt_factors[k], JEMMIG_yk[k], H_zi_yk_sorted[0, k],
                    H_zi_yk_sorted[0, k] - MI_zi_yk_sorted[0, k],
                    MI_zi_yk_sorted[1, k], ids_sorted[0, k], ids_sorted[1, k]))

    span = 3
    points_one_side = 5

    for n in range(len(data)):
        for k in range(len(gt_factors)):
            print("x={}, y={}!".format(n, gt_factors[k]))

            img_file = join(
                save_dir,
                "{}-x{}_num_samples={}].png".format(gt_factors[k], n,
                                                    args.num_samples))

            ids_top3 = ids_sorted[:3, k]
            MI_top3 = MI_zi_yk_sorted[:3, k]
            model.cond_all_latents_traverse_v2(
                img_file,
                sess,
                data[n],
                z_comps=ids_top3,
                z_comp_labels=[
                    "z[{}] ({:.4f})".format(comp, mi)
                    for comp, mi in zip(ids_top3, MI_top3)
                ],
                span=span,
                points_1_side=points_one_side,
                hl_x=True,
                font_size=9,
                title="{} (RMIG={:.4f}, JEMMIG={:.4f}, H={:.4f})".format(
                    gt_factors[k], RMIG_yk[k], JEMMIG_yk[k], H_yk[k]),
                title_font_scale=1.5,
                subplot_adjust={
                    'left': 0.16,
                    'right': 0.99,
                    'bottom': 0.01,
                    'top': 0.88
                },
                size_inches=(6.2, 1.7),
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)

    f.close()
Example #30
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(args.activation))

    if args.enc_dec_model == "1Konny":
        assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny(num_outputs=2)
    elif args.enc_dec_model == "my":
        assert args.z_dim == 150, "For 1Konny, z_dim must be 150. Found {}!".format(args.z_dim)

        encoder = Encoder_My(args.z_dim, stochastic=True, activation=activation)
        decoder = Decoder_My([img_height, img_width, 3], activation=activation,
                             output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_My(num_outputs=2)
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model))

    model = FactorVAE([img_height, img_width, 3], args.z_dim,
                      encoder=encoder, decoder=decoder,
                      discriminator_z=disc_z,
                      rec_x_mode=args.rec_x_mode,
                      use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'kld_loss': args.kld_loss_coeff,
        'tc_loss': args.tc_loss_coeff,
        'gp0_z_tc': args.gp0_z_tc_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = remove_dir_if_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)), ask_4_permission=False)
    save_dir = make_dir_if_not_exist(save_dir)
    # =====================================

    # z correlation matrix
    # ======================================= #
    for deterministic in [True, False]:
        all_z = []

        for batch_ids in iterate_data(celebA_loader.num_train_data, args.batch_size, shuffle=False):
            x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids)

            z = model.encode(sess, x, deterministic=deterministic)
            assert len(z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format(z.shape)

            all_z.append(z)

        all_z = np.concatenate(all_z, axis=0)

        # plot_corrmat(join(save_dir, "corr_mat[deter={}].png".format(deterministic)), all_z,
        #              font={'size': 14},
        #              subplot_adjust={'left': 0.04, 'right': 0.96, 'bottom': 0.02, 'top': 0.98},
        #              size_inches=(7.2, 6))

        plot_corrmat_with_histogram(join(save_dir, "corr_mat_hist[deter={}].png".format(deterministic)), all_z,
                                    font={'size': 14},
                                    subplot_adjust={'left': 0.04, 'right': 0.96, 'bottom': 0.02, 'top': 0.98},
                                    size_inches=(10, 3))