def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(os.path.join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Preparation
    # =====================================
    data_file = os.path.join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                             "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)

    # =====================================
    # Instantiate models
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_list()

    # =====================================
    # TF Graph Handler
    asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset"))
    img_eval = remove_dir_if_exist(os.path.join(asset_dir, "img_eval"),
                                   ask_4_permission=False)
    img_eval = make_dir_if_not_exist(img_eval)

    img_x_rec = make_dir_if_not_exist(os.path.join(img_eval, "x_rec"))
    img_z_rand_2_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_rand_2_traversal"))
    img_z_cond_all_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_cond_all_traversal"))
    img_z_cond_1_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_cond_1_traversal"))
    img_z_corr = make_dir_if_not_exist(os.path.join(img_eval, "z_corr"))
    img_z_dist = make_dir_if_not_exist(os.path.join(img_eval, "z_dist"))
    img_z_stat_dist = make_dir_if_not_exist(
        os.path.join(img_eval, "z_stat_dist"))
    img_rec_error_dist = make_dir_if_not_exist(
        os.path.join(img_eval, "rec_error_dist"))

    model_dir = make_dir_if_not_exist(os.path.join(args.output_dir,
                                                   "model_tf"))

    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)
    # =====================================

    # =====================================
    # Training Loop
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    #'''
    # Reconstruction
    # ======================================= #
    seed = 389
    x = x_train[np.arange(seed, seed + 64)]

    img_file = os.path.join(img_x_rec, 'x_rec_train.png')
    model.reconstruct_images(img_file,
                             sess,
                             x,
                             block_shape=[8, 8],
                             batch_size=-1,
                             dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #

    # z random/conditional traversal
    # ======================================= #
    # Plot z cont with z cont
    z_zero = np.zeros([args.z_dim], dtype=np.float32)
    z_rand = np.random.randn(args.z_dim)
    z_start, z_stop = -4, 4
    num_points = 8

    for i in range(args.z_dim):
        for j in range(i + 1, args.z_dim):
            print("Plot random 2 comps z traverse with {} and {} components!".
                  format(i, j))

            img_file = os.path.join(img_z_rand_2_traversal,
                                    'z[{},{},zero].png'.format(i, j))
            model.rand_2_latents_traverse(
                img_file,
                sess,
                default_z=z_zero,
                z_comp1=i,
                start1=z_start,
                stop1=z_stop,
                num_points1=num_points,
                z_comp2=j,
                start2=z_start,
                stop2=z_stop,
                num_points2=num_points,
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)

            img_file = os.path.join(img_z_rand_2_traversal,
                                    'z[{},{},rand].png'.format(i, j))
            model.rand_2_latents_traverse(
                img_file,
                sess,
                default_z=z_rand,
                z_comp1=i,
                start1=z_start,
                stop1=z_stop,
                num_points1=num_points,
                z_comp2=j,
                start2=z_stop,
                stop2=z_stop,
                num_points2=num_points,
                batch_size=args.batch_size,
                dec_output_2_img_func=binary_float_to_uint8)

    seed = 389
    z_start, z_stop = -4, 4
    num_itpl_points = 8

    for n in range(seed, seed + 30):
        print("Plot conditional all comps z traverse with test sample {}!".
              format(n))

        x = x_train[n]
        img_file = os.path.join(img_z_cond_all_traversal,
                                'x_train{}.png'.format(n))
        model.cond_all_latents_traverse(
            img_file,
            sess,
            x,
            start=z_start,
            stop=z_stop,
            num_itpl_points=num_itpl_points,
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)

    seed = 64
    z_start, z_stop = -4, 4
    num_itpl_points = 8
    print("Plot conditional 1 comp z traverse!")
    for i in range(args.z_dim):
        x = x_train[seed:seed + 64]
        img_file = os.path.join(
            img_z_cond_1_traversal,
            'x_train[{},{}]_z{}.png'.format(seed, seed + 64, i))
        model.cond_1_latent_traverse(
            img_file,
            sess,
            x,
            z_comp=i,
            start=z_start,
            stop=z_stop,
            num_itpl_points=num_itpl_points,
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #
    # '''

    # z correlation matrix
    # ======================================= #
    data = x_train

    all_z = []
    for batch_ids in iterate_data(len(data), args.batch_size, shuffle=False):
        x = data[batch_ids]

        z = model.encode(sess, x)
        assert len(
            z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format(
                z.shape)

        all_z.append(z)

    all_z = np.concatenate(all_z, axis=0)

    print("Start plotting!")
    plot_corrmat_with_histogram(os.path.join(img_z_corr, "corr_mat.png"),
                                all_z)
    plot_comp_dist(os.path.join(img_z_dist, 'z_{}'), all_z, x_lim=(-5, 5))
    print("Done!")
    # ======================================= #

    # '''
    # z gaussian stddev
    # ======================================= #
    print("\nPlot z mean and stddev!")
    data = x_train

    all_z_mean = []
    all_z_stddev = []

    for batch_ids in iterate_data(len(data), args.batch_size, shuffle=False):
        x = data[batch_ids]

        z_mean, z_stddev = sess.run(model.get_output(['z_mean', 'z_stddev']),
                                    feed_dict={
                                        model.is_train: False,
                                        model.x_ph: x
                                    })

        all_z_mean.append(z_mean)
        all_z_stddev.append(z_stddev)

    all_z_mean = np.concatenate(all_z_mean, axis=0)
    all_z_stddev = np.concatenate(all_z_stddev, axis=0)

    plot_comp_dist(os.path.join(img_z_stat_dist, 'z_mean_{}.png'),
                   all_z_mean,
                   x_lim=(-5, 5))
    plot_comp_dist(os.path.join(img_z_stat_dist, 'z_stddev_{}.png'),
                   all_z_stddev,
                   x_lim=(0, 3))
    # ======================================= #
    # '''

    # Decoder sensitivity
    # ======================================= #
    z_start = -3
    z_stop = 3
    for i in range(args.z_dim):
        print("\nPlot rec error distribution for z component {}!".format(i))

        all_z1 = np.array(all_z, copy=True, dtype=np.float32)
        all_z2 = np.array(all_z, copy=True, dtype=np.float32)

        all_z1[:, i] = z_start
        all_z2[:, i] = z_stop

        all_x_rec1 = []
        all_x_rec2 = []
        for batch_ids in iterate_data(len(x_train),
                                      args.batch_size,
                                      shuffle=False):
            z1 = all_z1[batch_ids]
            z2 = all_z2[batch_ids]

            x1 = model.decode(sess, z1)
            x2 = model.decode(sess, z2)

            all_x_rec1.append(x1)
            all_x_rec2.append(x2)

        all_x_rec1 = np.concatenate(all_x_rec1, axis=0)
        all_x_rec2 = np.concatenate(all_x_rec2, axis=0)

        rec_errors = np.sum(np.reshape((all_x_rec1 - all_x_rec2)**2,
                                       [len(x_train), 28 * 28]),
                            axis=1)
        plot_comp_dist(
            os.path.join(
                img_rec_error_dist,
                'rec_error[zi={},{},{}].png'.format(i, z_start, z_stop)),
            rec_errors)
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny",
                                         resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(
            args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim,
                                 stochastic=True,
                                 activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3],
                                 activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(
            args.enc_dec_model))

    model = AAE([img_height, img_width, 3],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=False)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "AAE_{}".format(args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=3,
                        suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * celebA_loader.num_train_data)
    eps = 1e-8

    # file
    f = open(join(
        save_dir, 'log[bins={},bin_limits={},data={}].txt'.format(
            num_bins, bin_limits, data_proportion)),
             mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)
    '''
    if attr_type == 0:
        attr_names = celebA_loader.attributes
    elif attr_type == 1:
        attr_names = ['Male', 'Black_Hair', 'Blond_Hair', 'Straight_Hair', 'Wavy_Hair', 'Bald',
                      'Oval_Face', 'Big_Nose', 'Chubby', 'Double_Chin', 'Goatee', 'No_Beard',
                      'Mouth_Slightly_Open', 'Smiling',
                      'Eyeglasses', 'Pale_Skin']
    else:
        raise ValueError("Only support factor_type=0 or 1!")
    '''

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[
        1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0],
                       bin_limits[1],
                       num_bins + 1,
                       endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths
               ) == num_bins, "len(bin_widths)={} while num_bins={}!".format(
                   len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths,
                             0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers
               ) == num_bins, "len(bin_centers)={} while num_bins={}!".format(
                   len(bin_centers), num_bins)
    # ================================= #

    # Compute representations
    # ================================= #
    z_data_attr_file = join(save_dir,
                            "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_attr_file):
        all_z_mean = []
        all_z_stddev = []
        all_attrs = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_data,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = celebA_loader.sample_images_from_dataset(
                sess, 'train', batch_ids)
            attrs = celebA_loader.sample_attrs_from_dataset('train', batch_ids)
            assert attrs.shape[1] == celebA_loader.num_attributes

            z_mean, z_stddev = sess.run(model.get_output(
                ['z_mean', 'z_stddev']),
                                        feed_dict={
                                            model.is_train: False,
                                            model.x_ph: x
                                        })

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)
            all_attrs.append(attrs)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)
        all_attrs = np.concatenate(all_attrs, axis=0)

        np.savez_compressed(z_data_attr_file,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev,
                            all_attrs=all_attrs)
    else:
        print("{} exists. Load data from file!".format(z_data_attr_file))
        with np.load(z_data_attr_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
            all_attrs = f['all_attrs']

    print_("")
    print_("all_z_mean.shape: {}".format(all_z_mean.shape))
    print_("all_z_stddev.shape: {}".format(all_z_stddev.shape))
    print_("all_attrs.shape: {}".format(all_attrs.shape))
    # ================================= #

    # Compute the probability mass function for ground truth factors
    # ================================= #
    num_attrs = all_attrs.shape[1]

    assert all_attrs.dtype == np.bool
    all_attrs = all_attrs.astype(np.int32)

    # (num_samples, num_attrs, 2)    # The first component is 1 and the last component is 0
    all_Q_y_cond_x = np.stack([all_attrs, 1 - all_attrs], axis=-1)
    # ================================= #

    # Compute Q(zi|x)
    # Compute I(zi, yk)
    # ================================= #
    Q_z_y = np.zeros([args.z_dim, num_attrs, num_bins, 2], dtype=np.float32)
    MI_z_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32)
    H_z_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32)
    H_z_4_diff_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32)
    H_y_4_diff_z = np.zeros([num_attrs, args.z_dim], dtype=np.float32)

    for i in range(args.z_dim):
        print_("")
        print_("Compute all_Q_z{}_cond_x!".format(i))

        # Q_s_cond_x
        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean),
                                      500,
                                      shuffle=False,
                                      include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(
                np.expand_dims(bin_centers, axis=0),
                mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i],
                                    bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(
                np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(
                np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        # (num_samples, num_bins)
        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
                                            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(
            np.sort(all_Q_s_cond_x[:30], axis=None))

        assert len(all_Q_s_cond_x) == len(
            all_attrs), "all_Q_s_cond_x.shape={}, all_attrs.shape={}".format(
                all_Q_s_cond_x.shape, all_attrs.shape)

        # I(z, y)
        for k in range(num_attrs):
            # Compute Q(zi, yk)
            # -------------------------------- #
            # (z_dim, 2)
            Q_zi_yk = np.matmul(np.transpose(all_Q_s_cond_x, axes=[1, 0]),
                                all_Q_y_cond_x[:, k, :])
            Q_zi_yk = Q_zi_yk / len(all_Q_y_cond_x)
            Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps)

            assert np.all(Q_zi_yk >= 0), "'Q_zi_yk' contains negative values. " \
                "sorted_Q_zi_yk[:10]:\n{}!".format(np.sort(Q_zi_yk, axis=None))

            log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps))

            Q_z_y[i, k] = Q_zi_yk
            print_("sum(Q_zi_yk): {}".format(np.sum(Q_zi_yk)))
            # -------------------------------- #

            # Compute Q_z
            # -------------------------------- #
            Q_zi = np.sum(Q_zi_yk, axis=1)
            log_Q_zi = np.log(np.clip(Q_zi, eps, 1 - eps))
            print_("sum(Q_z{}): {}".format(i, np.sum(Q_zi)))
            print_("Q_z{}: {}".format(i, Q_zi))
            # -------------------------------- #

            # Compute Q_y
            # -------------------------------- #
            Q_yk = np.sum(Q_zi_yk, axis=0)
            log_Q_yk = np.log(np.clip(Q_yk, eps, 1 - eps))
            print_("sum(Q_y{}): {}".format(k, np.sum(Q_yk)))
            print_("Q_y{}: {}".format(k, np.sum(Q_yk)))
            # -------------------------------- #

            MI_zi_yk = Q_zi_yk * (log_Q_zi_yk - np.expand_dims(
                log_Q_zi, axis=-1) - np.expand_dims(log_Q_yk, axis=0))

            MI_zi_yk = np.sum(MI_zi_yk)
            H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk)
            H_zi = -np.sum(Q_zi * log_Q_zi)
            H_yk = -np.sum(Q_yk * log_Q_yk)

            MI_z_y[i, k] = MI_zi_yk
            H_z_y[i, k] = H_zi_yk
            H_z_4_diff_y[i, k] = H_zi
            H_y_4_diff_z[k, i] = H_yk
    # ================================= #

    print_("")
    print_("MI_z_y:\n{}".format(MI_z_y))
    print_("H_z_y:\n{}".format(H_z_y))
    print_("H_z_4_diff_y:\n{}".format(H_z_4_diff_y))
    print_("H_y_4_diff_z:\n{}".format(H_y_4_diff_z))

    # Compute metric
    # ================================= #
    # Sorted in decreasing order
    MI_ids_sorted = np.argsort(MI_z_y, axis=0)[::-1]
    MI_sorted = np.take_along_axis(MI_z_y, MI_ids_sorted, axis=0)

    MI_gap_y = np.divide(MI_sorted[0, :] - MI_sorted[1, :], H_y_4_diff_z[:, 0])
    MIG = np.mean(MI_gap_y)

    print_("")
    print_("MI_sorted: {}".format(MI_sorted))
    print_("MI_ids_sorted: {}".format(MI_ids_sorted))
    print_("MI_gap_y: {}".format(MI_gap_y))
    print_("MIG: {}".format(MIG))

    results = {
        'Q_z_y': Q_z_y,
        'MI_z_y': MI_z_y,
        'H_z_y': H_z_y,
        'H_z_4_diff_y': H_z_4_diff_y,
        'H_y_4_diff_z': H_y_4_diff_z,
        'MI_sorted': MI_sorted,
        'MI_ids_sorted': MI_ids_sorted,
        'MI_gap_y': MI_gap_y,
        'MIG': MIG,
    }

    result_file = join(
        save_dir, 'results[bins={},bin_limits={},data={}].npz'.format(
            num_bins, bin_limits, data_proportion))
    np.savez_compressed(result_file, **results)
    # ================================= #

    f.close()
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes'][:, 1:]

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(args.enc_dec_model))

    model = AAE([64, 64, 1], args.z_dim,
                encoder=encoder, decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True, gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True)
    # =====================================

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data.npz")

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev!")

        count = 0
        for batch_ids in iterate_data(num_train, 10 * args.batch_size, shuffle=False):
            x = x_train[batch_ids]

            z_samples, z_mean, z_stddev = sess.run(
                model.get_output(['z1_gen', 'z_mean', 'z_stddev']),
                feed_dict={model.is_train: False, model.x_ph: x})

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ================================= #

    if args.gpu_support == 'cupy':
        print("Use cupy instead of numpy!")
        results = estimate_JEMMIG_cupy(all_z_mean, all_z_stddev, y=y_train,
                                       num_samples=args.num_samples,
                                       batch_size=args.batch, gpu=args.gpu_id)
    else:
        raise NotImplementedError("There is no numpy support since it is too slow!")

    result_file = join(save_dir, "results[num_samples={}].npz".format(args.num_samples))
    np.savez_compressed(result_file, **results)
    f.close()
Beispiel #4
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))

    print("y_train[:10]: {}".format(y_train[:10]))

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=5,
                        suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * num_train)
    assert num_data == num_train, "For dSprites, you must use all data!"
    eps = 1e-8

    # file
    f = open(join(
        save_dir, 'log[bins={},bin_limits={},data={}].txt'.format(
            num_bins, bin_limits, data_proportion)),
             mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[
        1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0],
                       bin_limits[1],
                       num_bins + 1,
                       endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths
               ) == num_bins, "len(bin_widths)={} while num_bins={}!".format(
                   len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths,
                             0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers
               ) == num_bins, "len(bin_centers)={} while num_bins={}!".format(
                   len(bin_centers), num_bins)
    # ================================= #

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_data,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = x_train[batch_ids]

            z_mean, z_stddev = sess.run(model.get_output(
                ['z_mean', 'z_stddev']),
                                        feed_dict={
                                            model.is_train: False,
                                            model.x_ph: x
                                        })

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ================================= #

    print_("")
    all_Q_z_cond_x = []
    for i in range(args.z_dim):
        print_("\nCompute all_Q_z{}_cond_x!".format(i))

        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean),
                                      500,
                                      shuffle=False,
                                      include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(
                np.expand_dims(bin_centers, axis=0),
                mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i],
                                    bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(
                np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(
                np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        # (num_samples, num_bins)
        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(np.sort(all_Q_s_cond_x[:30], axis=None))
        assert len(all_Q_s_cond_x) == num_train

        all_Q_z_cond_x.append(all_Q_s_cond_x)

    # (z_dim, num_samples, num_bins)
    all_Q_z_cond_x = np.asarray(all_Q_z_cond_x, dtype=np.float32)
    print_("all_Q_z_cond_x.shape: {}".format(all_Q_z_cond_x.shape))
    print_("sum(all_Q_z_cond_x)[:, :10]:\n{}".format(
        np.sum(all_Q_z_cond_x, axis=-1)[:, :10]))

    # (z_dim, num_bins)
    Q_z = np.mean(all_Q_z_cond_x, axis=1)
    log_Q_z = np.log(np.clip(Q_z, eps, 1 - eps))
    print_("Q_z.shape: {}".format(Q_z.shape))
    print_("sum(Q_z): {}".format(np.sum(Q_z, axis=-1)))

    # (z_dim, )
    H_z = -np.sum(Q_z * log_Q_z, axis=-1)

    # Factors
    gt_factors = ['shape', 'scale', 'rotation', 'pos_x', 'pos_y']
    gt_num_values = [3, 6, 40, 32, 32]

    MI_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)
    H_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)

    ids_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.int32)
    MI_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)
    H_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)

    H_y = []
    RMIG = []
    JEMMI = []

    for k, (factor, num_values) in enumerate(zip(gt_factors, gt_num_values)):
        print_("\n#" + "=" * 50 + "#")
        print_("The {}-th gt factor '{}' has {} values!".format(
            k, factor, num_values))

        print_("")
        # (num_samples, num_categories)
        # NOTE: We must use k+1 to account for the 'color' attribute, which is always white
        all_Q_yk_cond_x = one_hot(y_train[:, k + 1],
                                  num_categories=num_values,
                                  dtype=np.float32)
        print_("all_Q_yk_cond_x.shape: {}".format(all_Q_yk_cond_x.shape))

        # (num_categories)
        Q_yk = np.mean(all_Q_yk_cond_x, axis=0)
        log_Q_yk = np.log(np.clip(Q_yk, eps, 1 - eps))
        print_("Q_yk.shape: {}".format(Q_yk.shape))

        H_yk = -np.sum(Q_yk * log_Q_yk)
        print_("H_yk: {}".format(H_yk))
        H_y.append(H_yk)

        Q_z_yk = np.zeros([args.z_dim, num_bins, num_values], dtype=np.float32)

        # Compute I(zi, yk)
        for i in range(args.z_dim):
            print_("\n#" + "-" * 50 + "#")
            all_Q_zi_cond_x = all_Q_z_cond_x[i]
            assert len(all_Q_zi_cond_x) == len(all_Q_yk_cond_x) == num_train, \
                "all_Q_zi_cond_x.shape: {}, all_Q_yk_cond_x.shape: {}".format(
                    all_Q_zi_cond_x.shape, all_Q_yk_cond_x.shape)

            # (num_bins, num_categories)
            Q_zi_yk = np.matmul(np.transpose(all_Q_zi_cond_x, axes=[1, 0]),
                                all_Q_yk_cond_x)
            Q_zi_yk = Q_zi_yk / num_train
            print_("np.sum(Q_zi_yk): {}".format(np.sum(Q_zi_yk)))
            Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps)
            print_("np.sum(Q_zi_yk) (normalized): {}".format(np.sum(Q_zi_yk)))

            assert np.all(Q_zi_yk >= 0), "'Q_zi_yk' contains negative values. " \
                "sorted_Q_zi_yk[:10]:\n{}!".format(np.sort(Q_zi_yk, axis=None))

            # (num_bins, num_categories)
            log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps))

            print_("")
            print_("Q_zi (default): {}".format(Q_z[i]))
            print_("Q_zi (sum of Q_zi_yk over yk): {}".format(
                np.sum(Q_zi_yk, axis=-1)))

            print_("")
            print_("Q_yk (default): {}".format(Q_yk))
            print_("Q_yk (sum of Q_zi_yk over zi): {}".format(
                np.sum(Q_zi_yk, axis=0)))

            MI_zi_yk = Q_zi_yk * (log_Q_zi_yk - np.expand_dims(
                log_Q_z[i], axis=-1) - np.expand_dims(log_Q_yk, axis=0))

            MI_zi_yk = np.sum(MI_zi_yk)
            H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk)

            Q_z_yk[i] = Q_zi_yk
            MI_z_y[i, k] = MI_zi_yk
            H_z_y[i, k] = H_zi_yk

            print_("#" + "-" * 50 + "#")

        # Print statistics for all z
        print_("")
        print_("MI_z_yk:\n{}".format(MI_z_y[:, k]))
        print_("H_z_yk:\n{}".format(H_z_y[:, k]))
        print_("H_z:\n{}".format(H_z))
        print_("H_yk:\n{}".format(H_yk))

        # Compute RMIG and JEMMI
        ids_yk_sorted = np.argsort(MI_z_y[:, k], axis=0)[::-1]
        MI_z_yk_sorted = np.take_along_axis(MI_z_y[:, k],
                                            ids_yk_sorted,
                                            axis=0)
        H_z_yk_sorted = np.take_along_axis(H_z_y[:, k], ids_yk_sorted, axis=0)

        RMIG_yk = np.divide(MI_z_yk_sorted[0] - MI_z_yk_sorted[1], H_yk)
        JEMMI_yk = np.divide(
            H_z_yk_sorted[0] - MI_z_yk_sorted[0] + MI_z_yk_sorted[1],
            H_yk + np.log(num_bins))

        ids_sorted[:, k] = ids_yk_sorted
        MI_z_y_sorted[:, k] = MI_z_yk_sorted
        H_z_y_sorted[:, k] = H_z_yk_sorted

        RMIG.append(RMIG_yk)
        JEMMI.append(JEMMI_yk)

        print_("")
        print_("ids_sorted: {}".format(ids_sorted))
        print_("MI_z_yk_sorted: {}".format(MI_z_yk_sorted))
        print_("RMIG_yk: {}".format(RMIG_yk))
        print_("JEMMI_yk: {}".format(JEMMI_yk))

        z_yk_prob_file = join(
            save_dir,
            "z_yk_prob_4_{}[bins={},bin_limits={},data={}].npz".format(
                factor, num_bins, bin_limits, data_proportion))
        np.savez_compressed(z_yk_prob_file, Q_z_yk=Q_z_yk)
        print_("#" + "=" * 50 + "#")

    results = {
        "MI_z_y": MI_z_y,
        "H_z_y": H_z_y,
        "ids_sorted": ids_sorted,
        "MI_z_y_sorted": MI_z_y_sorted,
        "H_z_y_sorted": H_z_y_sorted,
        "H_z": H_z,
        "H_y": np.asarray(H_y, dtype=np.float32),
        "RMIG": np.asarray(RMIG, dtype=np.float32),
        "JEMMI": np.asarray(JEMMI, dtype=np.float32),
    }
    result_file = join(
        save_dir, "results[bins={},bin_limits={},data={}].npz".format(
            num_bins, bin_limits, data_proportion))
    np.savez_compressed(result_file, **results)

    f.close()
Beispiel #5
0
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir)

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate model
    # =====================================
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model))

    model = AAE([img_height, img_width, 3], args.z_dim,
                encoder=encoder, decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True, gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=True)
    # save_dir = make_dir_if_not_exist(save_dir)

    save_dir = make_dir_if_not_exist(join(args.save_dir, "AAE_{}".format(args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan, linewidth=1000, precision=4, suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * celebA_loader.num_train_data)
    eps = 1e-8

    # file
    f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'.
                  format(num_bins, bin_limits, data_proportion)), mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean and all_z_stddev!")
        count = 0
        for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False):
            x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids)

            z_mean, z_stddev = sess.run(
                model.get_output(['z_mean', 'z_stddev']),
                feed_dict={model.is_train: False, model.x_ph: x})

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']

    print_("")
    print_("all_z_mean.shape: {}".format(all_z_mean.shape))
    print_("all_z_stddev.shape: {}".format(all_z_stddev.shape))
    # ================================= #

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths) == num_bins, "len(bin_widths)={} while num_bins={}!".format(len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers) == num_bins, "len(bin_centers)={} while num_bins={}!".format(len(bin_centers), num_bins)
    # ================================= #

    # Compute mutual information
    # ================================= #
    H_z = []
    H_z_cond_x = []
    MI_z_x = []
    norm_MI_z_x = []
    Q_z_cond_x = []
    Q_z = []

    for i in range(args.z_dim):
        print_("")
        print_("Compute I(z{}, x)!".format(i))

        # Q_s_cond_x
        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(np.expand_dims(bin_centers, axis=0),
                                        mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                                        stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                                  deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        print_("sort(sum(all_Q_s_cond_x))[:10]: {}".format(
            np.sort(np.sum(all_Q_s_cond_x, axis=-1), axis=0)[:100]))
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(np.sort(all_Q_s_cond_x[:30], axis=None))
        Q_z_cond_x.append(all_Q_s_cond_x)

        H_zi_cond_x = -np.mean(np.sum(all_Q_s_cond_x * np.log(np.maximum(all_Q_s_cond_x, eps)), axis=1), axis=0)

        # Q_s
        Q_s = np.mean(all_Q_s_cond_x, axis=0)
        print_("Q_s: {}".format(Q_s))
        print_("sum(Q_s): {}".format(sum(Q_s)))
        assert np.all(Q_s >= 0), "'Q_s' contains negative values. " \
            "sorted_Q_s[:10]:\n{}!".format(np.sort(Q_s, axis=None))

        Q_s = Q_s / np.sum(Q_s, axis=0)
        print_("sum(Q_s) (normalized): {}".format(sum(Q_s)))

        Q_z.append(Q_s)

        H_zi = -np.sum(Q_s * np.log(np.maximum(Q_s, eps)), axis=0)

        MI_zi_x = H_zi - H_zi_cond_x
        normalized_MI_zi_x = (1.0 * MI_zi_x) / (H_zi + eps)

        print_("H_zi: {}".format(H_zi))
        print_("H_zi_cond_x: {}".format(H_zi_cond_x))
        print_("MI_zi_x: {}".format(MI_zi_x))
        print_("normalized_MI_zi_x: {}".format(normalized_MI_zi_x))

        H_z.append(H_zi)
        H_z_cond_x.append(H_zi_cond_x)
        MI_z_x.append(MI_zi_x)
        norm_MI_z_x.append(normalized_MI_zi_x)

    H_z = np.asarray(H_z, dtype=np.float32)
    H_z_cond_x = np.asarray(H_z_cond_x, dtype=np.float32)
    MI_z_x = np.asarray(MI_z_x, dtype=np.float32)
    norm_MI_z_x = np.asarray(norm_MI_z_x, dtype=np.float32)

    print_("")
    print_("H_z: {}".format(H_z))
    print_("H_z_cond_x: {}".format(H_z_cond_x))
    print_("MI_z_x: {}".format(MI_z_x))
    print_("norm_MI_z_x: {}".format(norm_MI_z_x))

    sorted_z_comps = np.argsort(MI_z_x, axis=0)[::-1]
    sorted_MI_z_x = np.take_along_axis(MI_z_x, sorted_z_comps, axis=0)
    print_("sorted_MI_z_x: {}".format(sorted_MI_z_x))
    print_("sorted_z_comps: {}".format(sorted_z_comps))

    sorted_norm_z_comps = np.argsort(norm_MI_z_x, axis=0)[::-1]
    sorted_norm_MI_z_x = np.take_along_axis(norm_MI_z_x, sorted_norm_z_comps, axis=0)
    print_("sorted_norm_MI_z_x: {}".format(sorted_norm_MI_z_x))
    print_("sorted_norm_z_comps: {}".format(sorted_norm_z_comps))

    result_file = join(save_dir, 'results[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))

    np.savez_compressed(result_file,
                        H_z=H_z, H_z_cond_x=H_z_cond_x, MI_z_x=MI_z_x, norm_MI_z_x=norm_MI_z_x,
                        sorted_MI_z_x=sorted_MI_z_x, sorted_z_comps=sorted_z_comps,
                        sorted_norm_MI_z_x=sorted_norm_MI_z_x,
                        sorted_norm_z_comps=sorted_norm_z_comps)

    Q_z_cond_x = np.asarray(Q_z_cond_x, dtype=np.float32)
    Q_z = np.asarray(Q_z, dtype=np.float32)
    z_prob_file = join(save_dir, 'z_prob[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))
    np.savez_compressed(z_prob_file, Q_z_cond_x=Q_z_cond_x, Q_z=Q_z)
Beispiel #6
0
def main(args):
    # Load config
    # ===================================== #
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)
    # ===================================== #

    # Load dataset
    # ===================================== #
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes'][:, 1:]

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))
    # ===================================== #

    # Build model
    # ===================================== #
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(
            args.enc_dec_model))

    model = AAE([64, 64, 1],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()
    # ===================================== #

    # Initialize session
    # ===================================== #
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)
    train_helper.load(sess, load_step=args.load_step)
    # ===================================== #

    # Experiments
    # ===================================== #
    save_dir = make_dir_if_not_exist(
        join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    np.set_printoptions(threshold=np.nan,
                        linewidth=1000,
                        precision=5,
                        suppress=True)
    # ===================================== #

    # Compute representations
    # ===================================== #
    z_data_file = join(save_dir, "z_data.npz")

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev!")

        count = 0
        for batch_ids in iterate_data(num_train,
                                      10 * args.batch_size,
                                      shuffle=False):
            x = x_train[batch_ids]

            z_samples, z_mean, z_stddev = sess.run(model.get_output(
                ['z1_gen', 'z_mean', 'z_stddev']),
                                                   feed_dict={
                                                       model.is_train: False,
                                                       model.x_ph: x
                                                   })

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file,
                            all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ===================================== #

    cont_mask = [False, True, True, True, True
                 ] if args.continuous_only else None

    if args.classifier == "LASSO":
        results = compute_metrics_with_LASSO(latents=all_z_mean,
                                             factors=y_train,
                                             params={
                                                 'alpha': args.LASSO_alpha,
                                                 'max_iter': args.LASSO_iters
                                             },
                                             cont_mask=cont_mask)
        result_file = join(
            save_dir, "results[LASSO,{},alpha={},iters={}].npz".format(
                "cont" if args.continuous_only else "all", args.LASSO_alpha,
                args.LASSO_iters))
    else:
        results = compute_metrics_with_RandomForest(latents=all_z_mean,
                                                    factors=y_train,
                                                    params={
                                                        'n_estimators':
                                                        args.RF_trees,
                                                        'max_depth':
                                                        args.RF_depth
                                                    })
        result_file = join(
            save_dir, "results[RF,{},trees={},depth={}].npz".format(
                "cont" if args.continuous_only else "all", args.RF_trees,
                args.RF_depth))

    np.savez_compressed(result_file, **results)
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Dataset
    # =====================================
    data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                     "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']
        # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y
        y_train = f['latents_classes']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("num_train: {}".format(num_train))

    print("y_train[:10]: {}".format(y_train[:10]))

    # =====================================
    # Instantiate model
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(args.enc_dec_model))

    model = AAE([64, 64, 1], args.z_dim,
                encoder=encoder, decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True, gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_tf_slim()

    # =====================================
    # Load model
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # =====================================
    # Experiments
    save_dir = make_dir_if_not_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)))
    # =====================================

    np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True)

    num_bins = args.num_bins
    bin_limits = tuple([float(s) for s in args.bin_limits.split(";")])
    data_proportion = args.data_proportion
    num_data = int(data_proportion * num_train)
    assert num_data == num_train, "For dSprites, you must use all data!"
    eps = 1e-8

    # file
    f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'.
                  format(num_bins, bin_limits, data_proportion)), mode='w')

    # print function
    print_ = functools.partial(print_both, file=f)

    print_("num_bins: {}".format(num_bins))
    print_("bin_limits: {}".format(bin_limits))
    print_("data_proportion: {}".format(data_proportion))

    # Compute bins
    # ================================= #
    print_("")
    print_("bin_limits: {}".format(bin_limits))
    assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[1], "bin_limits={}".format(bin_limits)

    bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True)
    print_("bins: {}".format(bins))
    assert len(bins) == num_bins + 1

    bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]
    print_("bin_widths: {}".format(bin_widths))
    assert len(bin_widths) == num_bins, "len(bin_widths)={} while num_bins={}!".format(len(bin_widths), num_bins)
    assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths)

    bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]
    print_("bin_centers: {}".format(bin_centers))
    assert len(bin_centers) == num_bins, "len(bin_centers)={} while num_bins={}!".format(len(bin_centers), num_bins)
    # ================================= #

    # Compute representations
    # ================================= #
    z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion))

    if not exists(z_data_file):
        all_z_mean = []
        all_z_stddev = []

        print("")
        print("Compute all_z_mean, all_z_stddev and all_attrs!")

        count = 0
        for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False):
            x = x_train[batch_ids]

            z_mean, z_stddev = sess.run(
                model.get_output(['z_mean', 'z_stddev']),
                feed_dict={model.is_train: False, model.x_ph: x})

            all_z_mean.append(z_mean)
            all_z_stddev.append(z_stddev)

            count += len(batch_ids)
            print("\rProcessed {} samples!".format(count), end="")
        print()

        all_z_mean = np.concatenate(all_z_mean, axis=0)
        all_z_stddev = np.concatenate(all_z_stddev, axis=0)

        np.savez_compressed(z_data_file, all_z_mean=all_z_mean,
                            all_z_stddev=all_z_stddev)
    else:
        print("{} exists. Load data from file!".format(z_data_file))
        with np.load(z_data_file, "r") as f:
            all_z_mean = f['all_z_mean']
            all_z_stddev = f['all_z_stddev']
    # ================================= #

    # Compute mutual information
    # ================================= #
    H_z = []
    H_z_cond_x = []
    MI_z_x = []
    norm_MI_z_x = []
    Q_z_cond_x = []

    for i in range(args.z_dim):
        print_("")
        print_("Compute I(z{}, x)!".format(i))

        # Q_s_cond_x
        all_Q_s_cond_x = []
        for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True):
            # (batch_size, num_bins)
            q_s_cond_x = normal_density(np.expand_dims(bin_centers, axis=0),
                                        mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),
                                        stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))

            # (batch_size, num_bins)
            max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)
            # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x)))

            # (batch_size, num_bins)
            deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32)

            # (batch_size, num_bins)
            Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)
            Q_s_cond_x = Q_s_cond_x / np.maximum(np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)
            # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            Q_s_cond_x = np.where(np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),
                                  deter_s_cond_x, Q_s_cond_x)
            # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))

            all_Q_s_cond_x.append(Q_s_cond_x)

        all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)
        print_("sort(sum(all_Q_s_cond_x))[:10]: {}".format(
            np.sort(np.sum(all_Q_s_cond_x, axis=-1), axis=0)[:100]))
        assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \
                                            "sorted_all_Q_s_cond_x[:30]:\n{}!".format(
            np.sort(all_Q_s_cond_x[:30], axis=None))
        Q_z_cond_x.append(all_Q_s_cond_x)

        H_zi_cond_x = -np.mean(np.sum(all_Q_s_cond_x * np.log(np.maximum(all_Q_s_cond_x, eps)), axis=1), axis=0)

        # Q_s
        Q_s = np.mean(all_Q_s_cond_x, axis=0)
        print_("Q_s: {}".format(Q_s))
        print_("sum(Q_s): {}".format(sum(Q_s)))
        assert np.all(Q_s >= 0), "'Q_s' contains negative values. " \
                                 "sorted_Q_s[:10]:\n{}!".format(np.sort(Q_s, axis=None))

        Q_s = Q_s / np.sum(Q_s, axis=0)
        print_("sum(Q_s) (normalized): {}".format(sum(Q_s)))

        H_zi = -np.sum(Q_s * np.log(np.maximum(Q_s, eps)), axis=0)

        MI_zi_x = H_zi - H_zi_cond_x
        normalized_MI_zi_x = (1.0 * MI_zi_x) / (H_zi + eps)

        print_("H_zi: {}".format(H_zi))
        print_("H_zi_cond_x: {}".format(H_zi_cond_x))
        print_("MI_zi_x: {}".format(MI_zi_x))
        print_("normalized_MI_zi_x: {}".format(normalized_MI_zi_x))

        H_z.append(H_zi)
        H_z_cond_x.append(H_zi_cond_x)
        MI_z_x.append(MI_zi_x)
        norm_MI_z_x.append(normalized_MI_zi_x)

    H_z = np.asarray(H_z, dtype=np.float32)
    H_z_cond_x = np.asarray(H_z_cond_x, dtype=np.float32)
    MI_z_x = np.asarray(MI_z_x, dtype=np.float32)
    norm_MI_z_x = np.asarray(norm_MI_z_x, dtype=np.float32)

    print_("")
    print_("H_z: {}".format(H_z))
    print_("H_z_cond_x: {}".format(H_z_cond_x))
    print_("MI_z_x: {}".format(MI_z_x))
    print_("norm_MI_z_x: {}".format(norm_MI_z_x))

    sorted_z_comps = np.argsort(MI_z_x, axis=0)[::-1]
    sorted_MI_z_x = np.take_along_axis(MI_z_x, sorted_z_comps, axis=0)
    print_("sorted_MI_z_x: {}".format(sorted_MI_z_x))
    print_("sorted_z_comps: {}".format(sorted_z_comps))

    sorted_norm_z_comps = np.argsort(norm_MI_z_x, axis=0)[::-1]
    sorted_norm_MI_z_x = np.take_along_axis(norm_MI_z_x, sorted_norm_z_comps, axis=0)
    print_("sorted_norm_MI_z_x: {}".format(sorted_norm_MI_z_x))
    print_("sorted_norm_z_comps: {}".format(sorted_norm_z_comps))

    result_file = join(save_dir, 'results[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))

    np.savez_compressed(result_file,
                        H_z=H_z, H_z_cond_x=H_z_cond_x, MI_z_x=MI_z_x, norm_MI_z_x=norm_MI_z_x,
                        sorted_MI_z_x=sorted_MI_z_x, sorted_z_comps=sorted_z_comps,
                        sorted_norm_MI_z_x=sorted_norm_MI_z_x,
                        sorted_norm_z_comps=sorted_norm_z_comps)

    Q_z_cond_x = np.asarray(Q_z_cond_x, dtype=np.float32)
    z_prob_file = join(save_dir, 'z_prob[bins={},bin_limits={},data={}].npz'.
                       format(num_bins, bin_limits, data_proportion))
    np.savez_compressed(z_prob_file, Q_z_cond_x=Q_z_cond_x)
    # ================================= #

    f.close()
def main(args):
    # =====================================
    # Load config
    # =====================================
    with open(os.path.join(args.output_dir, 'config.json')) as f:
        config = json.load(f)
    args.__dict__.update(config)

    # =====================================
    # Preparation
    # =====================================
    celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir)
    num_train = celebA_loader.num_train_data
    num_test = celebA_loader.num_test_data

    img_height, img_width = args.celebA_resize_size, args.celebA_resize_size
    celebA_loader.build_transformation_flow_tf(
        *celebA_loader.get_transform_fns("1Konny",
                                         resize_size=args.celebA_resize_size))

    # =====================================
    # Instantiate models
    # =====================================
    # Only use activation for encoder and decoder
    if args.activation == "relu":
        activation = tf.nn.relu
    elif args.activation == "leaky_relu":
        activation = tf.nn.leaky_relu
    else:
        raise ValueError("Do not support '{}' activation!".format(
            args.activation))

    if args.enc_dec_model == "1Konny":
        # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim)

        encoder = Encoder_1Konny(args.z_dim,
                                 stochastic=True,
                                 activation=activation)
        decoder = Decoder_1Konny([img_height, img_width, 3],
                                 activation=activation,
                                 output_activation=tf.nn.sigmoid)
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support encoder/decoder model '{}'!".format(
            args.enc_dec_model))

    model = AAE([img_height, img_width, 3],
                args.z_dim,
                encoder=encoder,
                decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True,
                gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_list()

    # =====================================
    # TF Graph Handler
    asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset"))
    img_eval = remove_dir_if_exist(os.path.join(asset_dir, "img_eval"),
                                   ask_4_permission=False)
    img_eval = make_dir_if_not_exist(img_eval)

    img_x_gen = make_dir_if_not_exist(os.path.join(img_eval, "x_gen"))
    img_x_rec = make_dir_if_not_exist(os.path.join(img_eval, "x_rec"))
    img_z_rand_2_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_rand_2_traversal"))
    img_z_cond_all_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_cond_all_traversal"))
    img_z_cond_1_traversal = make_dir_if_not_exist(
        os.path.join(img_eval, "z_cond_1_traversal"))
    img_z_corr = make_dir_if_not_exist(os.path.join(img_eval, "z_corr"))
    img_z_dist = make_dir_if_not_exist(os.path.join(img_eval, "z_dist"))
    img_z_stat_dist = make_dir_if_not_exist(
        os.path.join(img_eval, "z_stat_dist"))
    # img_rec_error_dist = make_dir_if_not_exist(os.path.join(img_eval, "rec_error_dist"))

    model_dir = make_dir_if_not_exist(os.path.join(args.output_dir,
                                                   "model_tf"))
    train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)
    # =====================================

    # =====================================
    # Training Loop
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)

    # Load model
    train_helper.load(sess, load_step=args.load_step)

    # '''
    # Generation
    # ======================================= #
    z = np.random.randn(64, args.z_dim)

    img_file = os.path.join(img_x_gen, 'x_gen_test.png')
    model.generate_images(img_file,
                          sess,
                          z,
                          block_shape=[8, 8],
                          batch_size=args.batch_size,
                          dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #
    # '''

    # '''
    # Reconstruction
    # ======================================= #
    seed = 389
    x = celebA_loader.sample_images_from_dataset(sess, 'test',
                                                 list(range(seed, seed + 64)))

    img_file = os.path.join(img_x_rec, 'x_rec_test.png')
    model.reconstruct_images(img_file,
                             sess,
                             x,
                             block_shape=[8, 8],
                             batch_size=-1,
                             dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #
    # '''

    # '''
    # z random traversal
    # ======================================= #
    if args.z_dim <= 5:
        print("z_dim = {}, perform random traversal!".format(args.z_dim))

        # Plot z cont with z cont
        z_zero = np.zeros([args.z_dim], dtype=np.float32)
        z_rand = np.random.randn(args.z_dim)
        z_start, z_stop = -4, 4
        num_points = 8

        for i in range(args.z_dim):
            for j in range(i + 1, args.z_dim):
                print(
                    "Plot random 2 comps z traverse with {} and {} components!"
                    .format(i, j))

                img_file = os.path.join(img_z_rand_2_traversal,
                                        'z[{},{},zero].png'.format(i, j))
                model.rand_2_latents_traverse(
                    img_file,
                    sess,
                    default_z=z_zero,
                    z_comp1=i,
                    start1=z_start,
                    stop1=z_stop,
                    num_points1=num_points,
                    z_comp2=j,
                    start2=z_start,
                    stop2=z_stop,
                    num_points2=num_points,
                    batch_size=args.batch_size,
                    dec_output_2_img_func=binary_float_to_uint8)

                img_file = os.path.join(img_z_rand_2_traversal,
                                        'z[{},{},rand].png'.format(i, j))
                model.rand_2_latents_traverse(
                    img_file,
                    sess,
                    default_z=z_rand,
                    z_comp1=i,
                    start1=z_start,
                    stop1=z_stop,
                    num_points1=num_points,
                    z_comp2=j,
                    start2=z_stop,
                    stop2=z_stop,
                    num_points2=num_points,
                    batch_size=args.batch_size,
                    dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #
    # '''

    # z conditional traversal (all features + one feature)
    # ======================================= #
    seed = 389
    num_samples = 30
    data = celebA_loader.sample_images_from_dataset(
        sess, 'train', list(range(seed, seed + num_samples)))

    z_start, z_stop = -4, 4
    num_itpl_points = 8
    for n in range(num_samples):
        print("Plot conditional all comps z traverse with test sample {}!".
              format(n))
        img_file = os.path.join(img_z_cond_all_traversal,
                                'x_train{}.png'.format(n))
        model.cond_all_latents_traverse(
            img_file,
            sess,
            data[n],
            start=z_start,
            stop=z_stop,
            num_itpl_points=num_itpl_points,
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)

    z_start, z_stop = -4, 4
    num_itpl_points = 8
    for i in range(args.z_dim):
        print("Plot conditional z traverse with comp {}!".format(i))
        img_file = os.path.join(
            img_z_cond_1_traversal,
            'x_train[{},{}]_z{}.png'.format(seed, seed + num_samples, i))
        model.cond_1_latent_traverse(
            img_file,
            sess,
            data,
            z_comp=i,
            start=z_start,
            stop=z_stop,
            num_itpl_points=num_itpl_points,
            batch_size=args.batch_size,
            dec_output_2_img_func=binary_float_to_uint8)
    # ======================================= #
    # '''

    # '''
    # z correlation matrix
    # ======================================= #
    all_z = []
    for batch_ids in iterate_data(num_train, args.batch_size, shuffle=False):
        x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids)

        z = model.encode(sess, x)
        assert len(
            z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format(
                z.shape)

        all_z.append(z)

    all_z = np.concatenate(all_z, axis=0)

    print("Start plotting!")
    plot_corrmat_with_histogram(os.path.join(img_z_corr, "corr_mat.png"),
                                all_z)
    plot_comp_dist(os.path.join(img_z_dist, 'z_{}'), all_z, x_lim=(-5, 5))
    print("Done!")
    # ======================================= #
    # '''

    # '''
    # z gaussian stddev
    # ======================================= #
    print("\nPlot z mean and stddev!")
    all_z_mean = []
    all_z_stddev = []

    for batch_ids in iterate_data(num_train, args.batch_size, shuffle=False):
        x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids)

        z_mean, z_stddev = sess.run(model.get_output(['z_mean', 'z_stddev']),
                                    feed_dict={
                                        model.is_train: False,
                                        model.x_ph: x
                                    })

        all_z_mean.append(z_mean)
        all_z_stddev.append(z_stddev)

    all_z_mean = np.concatenate(all_z_mean, axis=0)
    all_z_stddev = np.concatenate(all_z_stddev, axis=0)

    plot_comp_dist(os.path.join(img_z_stat_dist, 'z_mean_{}.png'),
                   all_z_mean,
                   x_lim=(-5, 5))
    plot_comp_dist(os.path.join(img_z_stat_dist, 'z_stddev_{}.png'),
                   all_z_stddev,
                   x_lim=(-0.5, 3))
def main(args):
    # =====================================
    # Preparation
    # =====================================
    data_file = os.path.join(RAW_DATA_DIR, "ComputerVision", "dSprites",
                             "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz")

    # It is already in the range [0, 1]
    with np.load(data_file, encoding="latin1") as f:
        x_train = f['imgs']

    x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)
    num_train = len(x_train)
    print("Number of training samples for dSprites: {}".format(num_train))

    args.output_dir = os.path.join(args.output_dir, args.run)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    else:
        if args.force_rm_dir:
            import shutil
            shutil.rmtree(args.output_dir, ignore_errors=True)
            print("Removed '{}'".format(args.output_dir))
        else:
            raise ValueError("Output directory '{}' existed. 'force_rm_dir' "
                             "must be set to True!".format(args.output_dir))
        os.mkdir(args.output_dir)

    save_args(os.path.join(args.output_dir, 'config.json'), args)
    # pp.pprint(args.__dict__)

    # =====================================
    # Instantiate models
    # =====================================
    if args.enc_dec_model == "1Konny":
        encoder = Encoder_1Konny(args.z_dim, stochastic=True)
        decoder = Decoder_1Konny()
        disc_z = DiscriminatorZ_1Konny()
    else:
        raise ValueError("Do not support enc_dec_model='{}'!".format(args.enc_dec_model))

    model = AAE([64, 64, 1], args.z_dim,
                encoder=encoder, decoder=decoder,
                discriminator_z=disc_z,
                rec_x_mode=args.rec_x_mode,
                stochastic_z=args.stochastic_z,
                use_gp0_z=True, gp0_z_mode=args.gp0_z_mode)

    loss_coeff_dict = {
        'rec_x': args.rec_x_coeff,
        'G_loss_z1_gen': args.G_loss_z1_gen_coeff,
        'D_loss_z1_gen': args.D_loss_z1_gen_coeff,
        'gp0_z': args.gp0_z_coeff,
    }

    model.build(loss_coeff_dict)
    SimpleParamPrinter.print_all_params_list()

    loss = model.get_loss()
    train_params = model.get_train_params()

    opt_Dz = tf.train.AdamOptimizer(learning_rate=args.lr_Dz, beta1=args.beta1_Dz, beta2=args.beta2_Dz)
    opt_AE = tf.train.AdamOptimizer(learning_rate=args.lr_AE, beta1=args.beta1_AE, beta2=args.beta2_AE)

    with tf.control_dependencies(model.get_all_update_ops()):
        train_op_Dz = opt_Dz.minimize(loss=loss['Dz_loss'], var_list=train_params['Dz_loss'])
        train_op_D = train_op_Dz

        train_op_AE = opt_AE.minimize(loss=loss['AE_loss'], var_list=train_params['AE_loss'])

    # =====================================
    # TF Graph Handler
    asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset"))
    img_gen_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img_gen"))
    img_rec_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img_rec"))
    img_itpl_dir = make_dir_if_not_exist(os.path.join(asset_dir, "img_itpl"))

    log_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "log"))
    train_log_file = os.path.join(log_dir, "train.log")

    summary_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "summary_tf"))
    model_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "model_tf"))

    train_helper = SimpleTrainHelper(
        log_dir=summary_dir,
        save_dir=model_dir,
        max_to_keep=3,
        max_to_keep_best=1,
    )
    # =====================================

    # =====================================
    # Training Loop
    # =====================================
    config_proto = tf.ConfigProto(allow_soft_placement=True)
    config_proto.gpu_options.allow_growth = True
    config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=config_proto)
    train_helper.initialize(sess, init_variables=True, create_summary_writer=True)

    Dz_fetch_keys = ['Dz_loss', 'D_loss_z0', 'D_loss_z1_gen',
                     'D_avg_prob_z0', 'D_avg_prob_z1_gen', 'gp0_z']
    D_fetch_keys = Dz_fetch_keys
    AE_fetch_keys = ['AE_loss', 'rec_x', 'G_loss_z1_gen']

    global_step = 0
    for epoch in range(args.epochs):
        for batch_ids in iterate_data(num_train, args.batch_size, shuffle=True):
            global_step += 1

            x = x_train[batch_ids]
            z = np.random.normal(size=[len(x), args.z_dim])

            for i in range(args.Dz_steps):
                _, Dm = sess.run([train_op_D, model.get_output(D_fetch_keys, as_dict=True)],
                    feed_dict={model.is_train: True, model.x_ph: x, model.z_ph: z})

            for i in range(args.AE_steps):
                _, AEm = sess.run([train_op_AE, model.get_output(AE_fetch_keys, as_dict=True)],
                    feed_dict={model.is_train: True, model.x_ph: x, model.z_ph: z})

            if global_step % args.save_freq == 0:
                train_helper.save(sess, global_step)

            if global_step % args.log_freq == 0:
                log_str = "\nEpoch {}/{}, Step {}, Dz_loss: {:.4f}, AE_loss: {:.4f}".format(
                              epoch, args.epochs, global_step, Dm['Dz_loss'], AEm['AE_loss']) + \
                          "\nrec_x: {:.4f}".format(AEm['rec_x']) + \
                          "\nD_loss_z0: {:.4f}, D_loss_z1_gen: {:.4f}, G_loss_z1_gen: {:.4f}".format(
                              Dm['D_loss_z0'], Dm['D_loss_z1_gen'], AEm['G_loss_z1_gen']) + \
                          "\nD_avg_prob_z0: {:.4f}, D_avg_prob_z1_gen: {:.4f}".format(
                              Dm['D_avg_prob_z0'], Dm['D_avg_prob_z1_gen']) + \
                          "\ngp0_z_coeff: {:.4f}, gp0_z: {:.4f}".format(args.gp0_z_coeff, Dm['gp0_z'])

                print(log_str)
                with open(train_log_file, "a") as f:
                    f.write(log_str)
                    f.write("\n")
                f.close()

                train_helper.add_summary(custom_tf_scalar_summary(
                    'AE_loss', AEm['AE_loss'], prefix='train'), global_step)
                train_helper.add_summary(custom_tf_scalar_summary(
                    'rec_x', AEm['rec_x'], prefix='train'), global_step)
                train_helper.add_summary(custom_tf_scalar_summary(
                    'G_loss_z1_gen', AEm['G_loss_z1_gen'], prefix='train'), global_step)

                train_helper.add_summary(custom_tf_scalar_summary(
                    'D_loss_z0', Dm['D_loss_z0'], prefix='train'), global_step)
                train_helper.add_summary(custom_tf_scalar_summary(
                    'D_loss_z1_gen', Dm['D_loss_z1_gen'], prefix='train'), global_step)

                train_helper.add_summary(custom_tf_scalar_summary(
                    'D_prob_z0', Dm['D_avg_prob_z0'], prefix='train'), global_step)
                train_helper.add_summary(custom_tf_scalar_summary(
                    'D_prob_z1_gen', Dm['D_avg_prob_z1_gen'], prefix='train'), global_step)

                train_helper.add_summary(custom_tf_scalar_summary(
                    'gp0_z', Dm['gp0_z'], prefix='train'), global_step)

            if global_step % args.viz_gen_freq == 0:
                # Generate images
                # ------------------------- #
                z = np.random.normal(size=[64, args.z_dim])
                img_file = os.path.join(img_gen_dir, 'step[%d]_gen_test.png' % global_step)

                model.generate_images(img_file, sess, z, block_shape=[8, 8],
                                      batch_size=args.batch_size,
                                      dec_output_2_img_func=binary_float_to_uint8)
                # ------------------------- #

            if global_step % args.viz_rec_freq == 0:
                # Reconstruct images
                # ------------------------- #
                x = x_train[np.random.choice(num_train, size=64, replace=False)]
                img_file = os.path.join(img_rec_dir, 'step[%d]_rec_test.png' % global_step)

                model.reconstruct_images(img_file, sess, x, block_shape=[8, 8],
                                         batch_size=args.batch_size,
                                         dec_output_2_img_func=binary_float_to_uint8)
                # ------------------------- #

            if global_step % args.viz_itpl_freq == 0:
                # Interpolate images
                # ------------------------- #
                x1 = x_train[np.random.choice(num_train, size=12, replace=False)]
                x2 = x_train[np.random.choice(num_train, size=12, replace=False)]

                img_file = os.path.join(img_itpl_dir, 'step[%d]_itpl_test.png' % global_step)

                model.interpolate_images(img_file, sess, x1, x2, num_itpl_points=12,
                                         batch_on_row=True, batch_size=args.batch_size,
                                         dec_output_2_img_func=binary_float_to_uint8)
                # ------------------------- #

        if epoch % 100 == 0:
            train_helper.save_separately(sess, model_name="model_epoch{}".format(epoch),
                                         global_step=global_step)

    # Last save
    train_helper.save(sess, global_step)