def main(args): # ===================================== # Load config # ===================================== with open(os.path.join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Preparation # ===================================== data_file = os.path.join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) # ===================================== # Instantiate models # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = AAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_list() # ===================================== # TF Graph Handler asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset")) img_eval = remove_dir_if_exist(os.path.join(asset_dir, "img_eval"), ask_4_permission=False) img_eval = make_dir_if_not_exist(img_eval) img_x_rec = make_dir_if_not_exist(os.path.join(img_eval, "x_rec")) img_z_rand_2_traversal = make_dir_if_not_exist( os.path.join(img_eval, "z_rand_2_traversal")) img_z_cond_all_traversal = make_dir_if_not_exist( os.path.join(img_eval, "z_cond_all_traversal")) img_z_cond_1_traversal = make_dir_if_not_exist( os.path.join(img_eval, "z_cond_1_traversal")) img_z_corr = make_dir_if_not_exist(os.path.join(img_eval, "z_corr")) img_z_dist = make_dir_if_not_exist(os.path.join(img_eval, "z_dist")) img_z_stat_dist = make_dir_if_not_exist( os.path.join(img_eval, "z_stat_dist")) img_rec_error_dist = make_dir_if_not_exist( os.path.join(img_eval, "rec_error_dist")) model_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # ===================================== # ===================================== # Training Loop # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) # Load model train_helper.load(sess, load_step=args.load_step) #''' # Reconstruction # ======================================= # seed = 389 x = x_train[np.arange(seed, seed + 64)] img_file = os.path.join(img_x_rec, 'x_rec_train.png') model.reconstruct_images(img_file, sess, x, block_shape=[8, 8], batch_size=-1, dec_output_2_img_func=binary_float_to_uint8) # ======================================= # # z random/conditional traversal # ======================================= # # Plot z cont with z cont z_zero = np.zeros([args.z_dim], dtype=np.float32) z_rand = np.random.randn(args.z_dim) z_start, z_stop = -4, 4 num_points = 8 for i in range(args.z_dim): for j in range(i + 1, args.z_dim): print("Plot random 2 comps z traverse with {} and {} components!". format(i, j)) img_file = os.path.join(img_z_rand_2_traversal, 'z[{},{},zero].png'.format(i, j)) model.rand_2_latents_traverse( img_file, sess, default_z=z_zero, z_comp1=i, start1=z_start, stop1=z_stop, num_points1=num_points, z_comp2=j, start2=z_start, stop2=z_stop, num_points2=num_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) img_file = os.path.join(img_z_rand_2_traversal, 'z[{},{},rand].png'.format(i, j)) model.rand_2_latents_traverse( img_file, sess, default_z=z_rand, z_comp1=i, start1=z_start, stop1=z_stop, num_points1=num_points, z_comp2=j, start2=z_stop, stop2=z_stop, num_points2=num_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) seed = 389 z_start, z_stop = -4, 4 num_itpl_points = 8 for n in range(seed, seed + 30): print("Plot conditional all comps z traverse with test sample {}!". format(n)) x = x_train[n] img_file = os.path.join(img_z_cond_all_traversal, 'x_train{}.png'.format(n)) model.cond_all_latents_traverse( img_file, sess, x, start=z_start, stop=z_stop, num_itpl_points=num_itpl_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) seed = 64 z_start, z_stop = -4, 4 num_itpl_points = 8 print("Plot conditional 1 comp z traverse!") for i in range(args.z_dim): x = x_train[seed:seed + 64] img_file = os.path.join( img_z_cond_1_traversal, 'x_train[{},{}]_z{}.png'.format(seed, seed + 64, i)) model.cond_1_latent_traverse( img_file, sess, x, z_comp=i, start=z_start, stop=z_stop, num_itpl_points=num_itpl_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # ======================================= # # ''' # z correlation matrix # ======================================= # data = x_train all_z = [] for batch_ids in iterate_data(len(data), args.batch_size, shuffle=False): x = data[batch_ids] z = model.encode(sess, x) assert len( z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format( z.shape) all_z.append(z) all_z = np.concatenate(all_z, axis=0) print("Start plotting!") plot_corrmat_with_histogram(os.path.join(img_z_corr, "corr_mat.png"), all_z) plot_comp_dist(os.path.join(img_z_dist, 'z_{}'), all_z, x_lim=(-5, 5)) print("Done!") # ======================================= # # ''' # z gaussian stddev # ======================================= # print("\nPlot z mean and stddev!") data = x_train all_z_mean = [] all_z_stddev = [] for batch_ids in iterate_data(len(data), args.batch_size, shuffle=False): x = data[batch_ids] z_mean, z_stddev = sess.run(model.get_output(['z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) plot_comp_dist(os.path.join(img_z_stat_dist, 'z_mean_{}.png'), all_z_mean, x_lim=(-5, 5)) plot_comp_dist(os.path.join(img_z_stat_dist, 'z_stddev_{}.png'), all_z_stddev, x_lim=(0, 3)) # ======================================= # # ''' # Decoder sensitivity # ======================================= # z_start = -3 z_stop = 3 for i in range(args.z_dim): print("\nPlot rec error distribution for z component {}!".format(i)) all_z1 = np.array(all_z, copy=True, dtype=np.float32) all_z2 = np.array(all_z, copy=True, dtype=np.float32) all_z1[:, i] = z_start all_z2[:, i] = z_stop all_x_rec1 = [] all_x_rec2 = [] for batch_ids in iterate_data(len(x_train), args.batch_size, shuffle=False): z1 = all_z1[batch_ids] z2 = all_z2[batch_ids] x1 = model.decode(sess, z1) x2 = model.decode(sess, z2) all_x_rec1.append(x1) all_x_rec2.append(x2) all_x_rec1 = np.concatenate(all_x_rec1, axis=0) all_x_rec2 = np.concatenate(all_x_rec2, axis=0) rec_errors = np.sum(np.reshape((all_x_rec1 - all_x_rec2)**2, [len(x_train), 28 * 28]), axis=1) plot_comp_dist( os.path.join( img_rec_error_dist, 'rec_error[zi={},{},{}].png'.format(i, z_start, z_stop)), rec_errors)
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) num_train = celebA_loader.num_train_data # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format(args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model)) model = FactorVAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # save_dir = remove_dir_if_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)), ask_4_permission=True) # save_dir = make_dir_if_not_exist(save_dir) save_dir = make_dir_if_not_exist(join(args.save_dir, "FactorVAE_{}".format(args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) num_bins = args.num_bins data_proportion = args.data_proportion bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) top_k = args.top_k f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'. format(num_bins, bin_limits, data_proportion)), mode='w') print_ = functools.partial(print_both, file=f) result_file = join(args.informativeness_metrics_dir, "FactorVAE_{}".format(args.run), 'results[bins={},bin_limits={},data={}].npz'. format(num_bins, bin_limits, data_proportion)) results = np.load(result_file, "r") print_("") print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) print_("top_k: {}".format(top_k)) # Plotting # =========================================== # # seed = 389 # num_samples = 30 seed = 398 num_samples = 1 ids = list(range(seed, seed + num_samples)) print_("\nids: {}".format(ids)) data = celebA_loader.sample_images_from_dataset(sess, 'train', ids) span = 3 points_one_side = 5 print_("sorted_MI: {}".format(results["sorted_MI_z_x"])) print_("sorted_z_ids: {}".format(results["sorted_z_comps"])) print_("sorted_norm_MI: {}".format(results["sorted_norm_MI_z_x"])) print_("sorted_norm_z_ids: {}".format(results["sorted_norm_z_comps"])) top_MI = results["sorted_MI_z_x"][:top_k] top_z_ids = results["sorted_z_comps"][:top_k] top_norm_MI = results["sorted_norm_MI_z_x"][:top_k] top_norm_z_ids = results["sorted_norm_z_comps"][:top_k] print("Matplotlib font size: {}".format(matplotlib.rcParams['font.size'],)) for n in range(len(ids)): if top_k == 10: print("Plot conditional all comps z traverse with train sample {}!".format(ids[n])) img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={}].png". format(ids[n], num_bins, bin_limits, data_proportion)) model.cond_all_latents_traverse_v2(img_file, sess, data[n], z_comps=top_z_ids, z_comp_labels=["z[{}] ({:.2f})".format(comp, mi) for comp, mi in zip(top_z_ids, top_MI)], span=span, points_1_side=points_one_side, hl_x=True, font_size=matplotlib.rcParams['font.size'], subplot_adjust={'left': 0.15, 'right': 0.99, 'bottom': 0.01, 'top': 0.99}, size_inches=(6.3, 4.9), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={},norm].png". format(ids[n], num_bins, bin_limits, data_proportion)) model.cond_all_latents_traverse_v2(img_file, sess, data[n], z_comps=top_norm_z_ids, z_comp_labels=["z[{}] ({:.2f})".format(comp, mi) for comp, mi in zip(top_norm_z_ids, top_norm_MI)], span=span, points_1_side=points_one_side, hl_x=True, font_size=matplotlib.rcParams['font.size'], subplot_adjust={'left': 0.15, 'right': 0.99, 'bottom': 0.01, 'top': 0.99}, size_inches=(6.3, 4.9), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) else: print("Plot conditional all comps z traverse with train sample {}!".format(ids[n])) img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={}].png". format(ids[n], num_bins, bin_limits, data_proportion)) model.cond_all_latents_traverse_v2(img_file, sess, data[n], z_comps=top_z_ids, z_comp_labels=["z[{}] ({:.2f})".format(comp, mi) for comp, mi in zip(top_z_ids, top_MI)], span=span, points_1_side=points_one_side, hl_x=True, font_size=5, subplot_adjust={'left': 0.19, 'right': 0.99, 'bottom': 0.01, 'top': 0.99}, size_inches=(2.98, 9.85), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) img_file = join(save_dir, "x_train[{}][bins={},bin_limits={},data={},norm].png". format(ids[n], num_bins, bin_limits, data_proportion)) model.cond_all_latents_traverse_v2(img_file, sess, data[n], z_comps=top_norm_z_ids, z_comp_labels=["z[{}] ({:.2f})".format(comp, mi) for comp, mi in zip(top_norm_z_ids, top_norm_MI)], span=span, points_1_side=points_one_side, hl_x=True, font_size=5, subplot_adjust={'left': 0.19, 'right': 0.99, 'bottom': 0.01, 'top': 0.99}, size_inches=(2.98, 9.85), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # =========================================== # f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y y_train = f['latents_classes'][:, 1:] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) num_train = len(x_train) print("num_train: {}".format(num_train)) print("y_train[:10]: {}".format(y_train[:10])) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support enc_dec_model='{}'!".format(args.enc_dec_model)) model = FactorVAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, 'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = make_dir_if_not_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True) num_bins = args.num_bins data_proportion = args.data_proportion num_data = int(data_proportion * num_train) assert num_data == num_train, "For dSprites, you must use all data!" # file f = open(join(save_dir, 'log[bins={},data={}].txt'. format(num_bins, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) print_("num_bins: {}".format(num_bins)) print_("data_proportion: {}".format(data_proportion)) # Compute representations # ================================= # z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion)) if not exists(z_data_file): all_z_mean = [] all_z_stddev = [] print("") print("Compute all_z_mean, all_z_stddev and all_attrs!") count = 0 for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False): x = x_train[batch_ids] z_mean, z_stddev = sess.run( model.get_output(['z_mean', 'z_stddev']), feed_dict={model.is_train: False, model.x_ph: x}) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev) else: print("{} exists. Load data from file!".format(z_data_file)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] print_("") print_("all_z_mean.shape: {}".format(all_z_mean.shape)) print_("all_z_stddev.shape: {}".format(all_z_stddev.shape)) # ================================= # # Transpose and compute MIG score # ================================= # assert len(all_z_mean) == len(y_train) # (num_latents, num_samples) all_z_mean = np.transpose(all_z_mean, [1, 0]) print_("") print_("all_z_mean.shape: {}".format(all_z_mean.shape)) y_train = np.transpose(y_train, [1, 0]) print_("") print_("y_train.shape: {}".format(y_train.shape)) # All # --------------------------------- # result_all = compute_mig(all_z_mean, y_train, is_discrete_z=False, is_discrete_y=True, num_bins=num_bins) # (num_latents, num_factors) MI_gap_y = result_all['MI_gap_y'] attr_ids_sorted = np.argsort(MI_gap_y, axis=0)[::-1].tolist() MI_gap_y_sorted = MI_gap_y[attr_ids_sorted].tolist() print_("") print_("MIG: {}".format(result_all['MIG'])) print_("Sorted factors:\n{}".format(list(zip(attr_ids_sorted, MI_gap_y_sorted)))) save_file = join(save_dir, "results[bins={},data={}].npz".format(num_bins, data_proportion)) np.savez_compressed(save_file, **result_all) # --------------------------------- # # ================================= # f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] x_train = np.reshape(x_train, [3, 6, 40, 32, 32, 64, 64, 1]) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = FactorVAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, 'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) result_file = join(args.JEMMIG_sampling_dir, "{}_{}".format(args.enc_dec_model, args.run), "results[num_samples={}].npz".format(args.num_samples)) results = np.load(result_file, "r") print("results.keys: {}".format(list(results.keys()))) # Plotting # =========================================== # data = [ x_train[0, 3, 20, 16, 16], x_train[1, 3, 20, 16, 16], x_train[2, 3, 20, 16, 16] ] gt_factors = ['Shape', 'Scale', 'Rotation', 'Pos_x', 'Pos_y'] ids_sorted = results['id_sorted'] MI_zi_yk_sorted = results['MI_zi_yk_sorted'] H_zi_yk_sorted = results['H_zi_yk_sorted'] H_yk = results['H_yk'] RMIG_yk = results['RMIG_yk'] RMIG_norm_yk = results['RMIG_norm_yk'] JEMMIG_yk = results['JEMMIG_yk'] print("MI_zi_yk_sorted:\n{}".format(MI_zi_yk_sorted)) print("\nShow RMIG!") for k in range(len(gt_factors)): print( "{}, RMIG: {:.4f}, RMIG (norm): {:.4f}, H: {:.4f}, I1: {:.4f}, I2: {:.4f}" .format(gt_factors[k], RMIG_yk[k], RMIG_norm_yk[k], H_yk[k], MI_zi_yk_sorted[0, k], MI_zi_yk_sorted[1, k])) print("\nShow JEMMIG!") for k in range(len(gt_factors)): print( "{}, JEMMIG: {:.4f}, H1: {:.4f}, H1-I1: {:.4f}, I2: {:.4f}, top2 ids: z{}, z{}" .format(gt_factors[k], JEMMIG_yk[k], H_zi_yk_sorted[0, k], H_zi_yk_sorted[0, k] - MI_zi_yk_sorted[0, k], MI_zi_yk_sorted[1, k], ids_sorted[0, k], ids_sorted[1, k])) span = 3 points_one_side = 5 for n in range(len(data)): for k in range(len(gt_factors)): print("x={}, y={}!".format(n, gt_factors[k])) img_file = join( save_dir, "{}-x{}_num_samples={}].png".format(gt_factors[k], n, args.num_samples)) ids_top3 = ids_sorted[:3, k] MI_top3 = MI_zi_yk_sorted[:3, k] model.cond_all_latents_traverse_v2( img_file, sess, data[n], z_comps=ids_top3, z_comp_labels=[ "z[{}] ({:.4f})".format(comp, mi) for comp, mi in zip(ids_top3, MI_top3) ], span=span, points_1_side=points_one_side, hl_x=True, font_size=9, title="{} (RMIG={:.4f}, JEMMIG={:.4f}, H={:.4f})".format( gt_factors[k], RMIG_yk[k], JEMMIG_yk[k], H_yk[k]), title_font_scale=1.5, subplot_adjust={ 'left': 0.16, 'right': 0.99, 'bottom': 0.01, 'top': 0.88 }, size_inches=(6.2, 1.7), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y y_train = f['latents_classes'] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) num_train = len(x_train) print("num_train: {}".format(num_train)) print("y_train[:10]: {}".format(y_train[:10])) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support enc_dec_model='{}'!".format(args.enc_dec_model)) model = AAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = make_dir_if_not_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion num_data = int(data_proportion * num_train) assert num_data == num_train, "For dSprites, you must use all data!" top_k = args.top_k eps = 1e-8 # file f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'. format(num_bins, bin_limits, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) print_("top_k: {}".format(top_k)) # Compute bins # ================================= # print_("") print_("bin_limits: {}".format(bin_limits)) assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[1], "bin_limits={}".format(bin_limits) bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True) print_("bins: {}".format(bins)) assert len(bins) == num_bins + 1 bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))] print_("bin_widths: {}".format(bin_widths)) assert len(bin_widths) == num_bins, "len(bin_widths)={} while num_bins={}!".format(len(bin_widths), num_bins) assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths) bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))] print_("bin_centers: {}".format(bin_centers)) assert len(bin_centers) == num_bins, "len(bin_centers)={} while num_bins={}!".format(len(bin_centers), num_bins) # ================================= # # Compute representations # ================================= # z_data_file = join(args.informativeness_metrics_dir, "{}_{}".format(args.enc_dec_model, args.run), "z_data[data={}].npz".format(data_proportion)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] print_("") print_("all_z_mean.shape: {}".format(all_z_mean.shape)) print_("all_z_stddev.shape: {}".format(all_z_stddev.shape)) # ================================= # # Compute the mutual information # ================================= # mi_file = join(args.informativeness_metrics_dir, "{}_{}".format(args.enc_dec_model, args.run), 'results[bins={},bin_limits={},data={}].npz'. format(num_bins, bin_limits, data_proportion)) with np.load(mi_file, "r") as f: sorted_MI_z_x = f['sorted_MI_z_x'] sorted_z_ids = f['sorted_z_comps'] H_z = f['H_z'] if top_k > 0: top_MI = sorted_MI_z_x[:top_k] top_z_ids = sorted_z_ids[:top_k] bot_MI = sorted_MI_z_x[-top_k:] bot_z_ids = sorted_z_ids[-top_k:] top_bot_MI = np.concatenate([top_MI, bot_MI], axis=0) top_bot_z_ids = np.concatenate([top_z_ids, bot_z_ids], axis=0) print_("top MI: {}".format(top_MI)) print_("top_z_ids: {}".format(top_z_ids)) print_("bot MI: {}".format(bot_MI)) print_("bot_z_ids: {}".format(bot_z_ids)) else: top_bot_MI = sorted_MI_z_x top_bot_z_ids = sorted_z_ids # ================================= # H_z1z2_mean_mat = np.full([len(top_bot_z_ids), len(top_bot_z_ids)], -1, dtype=np.float32) MI_z1z2_mean_mat = np.full([len(top_bot_z_ids), len(top_bot_z_ids)], -1, dtype=np.float32) H_z1z2_mean = [] MI_z1z2_mean = [] z1z2_ids = [] # Compute the mutual information # ================================= # for i in range(len(top_bot_z_ids)): z_idx1 = top_bot_z_ids[i] H_s1 = H_z[z_idx1] for j in range(i + 1, len(top_bot_z_ids)): z_idx2 = top_bot_z_ids[j] H_s2 = H_z[z_idx2] print_("") print_("Compute MI(z{}_mean, z{}_mean)!".format(z_idx1, z_idx2)) s1s2_mean_counter = np.zeros([num_bins, num_bins], dtype=np.int32) for batch_ids in iterate_data(len(all_z_mean), 100, shuffle=False, include_remaining=True): s1 = at_bin(all_z_mean[batch_ids, z_idx1], bins, one_hot=False) s2 = at_bin(all_z_mean[batch_ids, z_idx2], bins, one_hot=False) for s1_, s2_ in zip(s1, s2): s1s2_mean_counter[s1_, s2_] += 1 # I(s1, s2) = Q(s1, s2) * (log Q(s1, s2) - log Q(s1) log Q(s2)) # ---------------------------------- # Q_s1s2_mean = (s1s2_mean_counter * 1.0) / np.sum(s1s2_mean_counter).astype(np.float32) log_Q_s1s2_mean = np.log(np.maximum(Q_s1s2_mean, eps)) H_s1s2_mean = -np.sum(Q_s1s2_mean * log_Q_s1s2_mean) MI_s1s2_mean = H_s1 + H_s2 - H_s1s2_mean print_("H_s1: {}".format(H_s1)) print_("H_s2: {}".format(H_s2)) print_("H_s1s2_mean: {}".format(H_s1s2_mean)) print_("MI_s1s2_mean: {}".format(MI_s1s2_mean)) H_z1z2_mean.append(H_s1s2_mean) MI_z1z2_mean.append(MI_s1s2_mean) z1z2_ids.append((z_idx1, z_idx2)) H_z1z2_mean_mat[i, j] = H_s1s2_mean H_z1z2_mean_mat[j, i] = H_s1s2_mean MI_z1z2_mean_mat[i, j] = MI_s1s2_mean MI_z1z2_mean_mat[j, i] = MI_s1s2_mean H_z1z2_mean = np.asarray(H_z1z2_mean, dtype=np.float32) MI_z1z2_mean = np.asarray(MI_z1z2_mean, dtype=np.float32) z1z2_ids = np.asarray(z1z2_ids, dtype=np.int32) result_file = join(save_dir, "results[bins={},bin_limits={},data={},k={}].npz". format(num_bins, bin_limits, data_proportion, top_k)) results = { 'H_z1z2_mean': H_z1z2_mean, 'MI_z1z2_mean': MI_z1z2_mean, 'H_z1z2_mean_mat': H_z1z2_mean_mat, 'MI_z1z2_mean_mat': MI_z1z2_mean_mat, 'z1z2_ids': z1z2_ids, } np.savez_compressed(result_file, **results) # ================================= # f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y y_train = f['latents_classes'] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) num_train = len(x_train) print("num_train: {}".format(num_train)) print("y_train[:10]: {}".format(y_train[:10])) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = AAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion num_data = int(data_proportion * num_train) assert num_data == num_train, "For dSprites, you must use all data!" eps = 1e-8 # file f = open(join( save_dir, 'log[bins={},bin_limits={},data={}].txt'.format( num_bins, bin_limits, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) # Compute bins # ================================= # print_("") print_("bin_limits: {}".format(bin_limits)) assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[ 1], "bin_limits={}".format(bin_limits) bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True) print_("bins: {}".format(bins)) assert len(bins) == num_bins + 1 bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))] print_("bin_widths: {}".format(bin_widths)) assert len(bin_widths ) == num_bins, "len(bin_widths)={} while num_bins={}!".format( len(bin_widths), num_bins) assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths) bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))] print_("bin_centers: {}".format(bin_centers)) assert len(bin_centers ) == num_bins, "len(bin_centers)={} while num_bins={}!".format( len(bin_centers), num_bins) # ================================= # # Compute representations # ================================= # z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion)) if not exists(z_data_file): all_z_mean = [] all_z_stddev = [] print("") print("Compute all_z_mean, all_z_stddev and all_attrs!") count = 0 for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False): x = x_train[batch_ids] z_mean, z_stddev = sess.run(model.get_output( ['z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev) else: print("{} exists. Load data from file!".format(z_data_file)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] # ================================= # print_("") all_Q_z_cond_x = [] for i in range(args.z_dim): print_("\nCompute all_Q_z{}_cond_x!".format(i)) all_Q_s_cond_x = [] for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True): # (batch_size, num_bins) q_s_cond_x = normal_density( np.expand_dims(bin_centers, axis=0), mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1), stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1)) # (batch_size, num_bins) max_q_s_cond_x = np.max(q_s_cond_x, axis=-1) # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x))) # (batch_size, num_bins) deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32) # (batch_size, num_bins) Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0) Q_s_cond_x = Q_s_cond_x / np.maximum( np.sum(Q_s_cond_x, axis=1, keepdims=True), eps) # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) Q_s_cond_x = np.where( np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1), deter_s_cond_x, Q_s_cond_x) # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) all_Q_s_cond_x.append(Q_s_cond_x) # (num_samples, num_bins) all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0) assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \ "sorted_all_Q_s_cond_x[:30]:\n{}!".format(np.sort(all_Q_s_cond_x[:30], axis=None)) assert len(all_Q_s_cond_x) == num_train all_Q_z_cond_x.append(all_Q_s_cond_x) # (z_dim, num_samples, num_bins) all_Q_z_cond_x = np.asarray(all_Q_z_cond_x, dtype=np.float32) print_("all_Q_z_cond_x.shape: {}".format(all_Q_z_cond_x.shape)) print_("sum(all_Q_z_cond_x)[:, :10]:\n{}".format( np.sum(all_Q_z_cond_x, axis=-1)[:, :10])) # (z_dim, num_bins) Q_z = np.mean(all_Q_z_cond_x, axis=1) log_Q_z = np.log(np.clip(Q_z, eps, 1 - eps)) print_("Q_z.shape: {}".format(Q_z.shape)) print_("sum(Q_z): {}".format(np.sum(Q_z, axis=-1))) # (z_dim, ) H_z = -np.sum(Q_z * log_Q_z, axis=-1) # Factors gt_factors = ['shape', 'scale', 'rotation', 'pos_x', 'pos_y'] gt_num_values = [3, 6, 40, 32, 32] MI_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32) H_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32) ids_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.int32) MI_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32) H_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32) H_y = [] RMIG = [] JEMMI = [] for k, (factor, num_values) in enumerate(zip(gt_factors, gt_num_values)): print_("\n#" + "=" * 50 + "#") print_("The {}-th gt factor '{}' has {} values!".format( k, factor, num_values)) print_("") # (num_samples, num_categories) # NOTE: We must use k+1 to account for the 'color' attribute, which is always white all_Q_yk_cond_x = one_hot(y_train[:, k + 1], num_categories=num_values, dtype=np.float32) print_("all_Q_yk_cond_x.shape: {}".format(all_Q_yk_cond_x.shape)) # (num_categories) Q_yk = np.mean(all_Q_yk_cond_x, axis=0) log_Q_yk = np.log(np.clip(Q_yk, eps, 1 - eps)) print_("Q_yk.shape: {}".format(Q_yk.shape)) H_yk = -np.sum(Q_yk * log_Q_yk) print_("H_yk: {}".format(H_yk)) H_y.append(H_yk) Q_z_yk = np.zeros([args.z_dim, num_bins, num_values], dtype=np.float32) # Compute I(zi, yk) for i in range(args.z_dim): print_("\n#" + "-" * 50 + "#") all_Q_zi_cond_x = all_Q_z_cond_x[i] assert len(all_Q_zi_cond_x) == len(all_Q_yk_cond_x) == num_train, \ "all_Q_zi_cond_x.shape: {}, all_Q_yk_cond_x.shape: {}".format( all_Q_zi_cond_x.shape, all_Q_yk_cond_x.shape) # (num_bins, num_categories) Q_zi_yk = np.matmul(np.transpose(all_Q_zi_cond_x, axes=[1, 0]), all_Q_yk_cond_x) Q_zi_yk = Q_zi_yk / num_train print_("np.sum(Q_zi_yk): {}".format(np.sum(Q_zi_yk))) Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps) print_("np.sum(Q_zi_yk) (normalized): {}".format(np.sum(Q_zi_yk))) assert np.all(Q_zi_yk >= 0), "'Q_zi_yk' contains negative values. " \ "sorted_Q_zi_yk[:10]:\n{}!".format(np.sort(Q_zi_yk, axis=None)) # (num_bins, num_categories) log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps)) print_("") print_("Q_zi (default): {}".format(Q_z[i])) print_("Q_zi (sum of Q_zi_yk over yk): {}".format( np.sum(Q_zi_yk, axis=-1))) print_("") print_("Q_yk (default): {}".format(Q_yk)) print_("Q_yk (sum of Q_zi_yk over zi): {}".format( np.sum(Q_zi_yk, axis=0))) MI_zi_yk = Q_zi_yk * (log_Q_zi_yk - np.expand_dims( log_Q_z[i], axis=-1) - np.expand_dims(log_Q_yk, axis=0)) MI_zi_yk = np.sum(MI_zi_yk) H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk) Q_z_yk[i] = Q_zi_yk MI_z_y[i, k] = MI_zi_yk H_z_y[i, k] = H_zi_yk print_("#" + "-" * 50 + "#") # Print statistics for all z print_("") print_("MI_z_yk:\n{}".format(MI_z_y[:, k])) print_("H_z_yk:\n{}".format(H_z_y[:, k])) print_("H_z:\n{}".format(H_z)) print_("H_yk:\n{}".format(H_yk)) # Compute RMIG and JEMMI ids_yk_sorted = np.argsort(MI_z_y[:, k], axis=0)[::-1] MI_z_yk_sorted = np.take_along_axis(MI_z_y[:, k], ids_yk_sorted, axis=0) H_z_yk_sorted = np.take_along_axis(H_z_y[:, k], ids_yk_sorted, axis=0) RMIG_yk = np.divide(MI_z_yk_sorted[0] - MI_z_yk_sorted[1], H_yk) JEMMI_yk = np.divide( H_z_yk_sorted[0] - MI_z_yk_sorted[0] + MI_z_yk_sorted[1], H_yk + np.log(num_bins)) ids_sorted[:, k] = ids_yk_sorted MI_z_y_sorted[:, k] = MI_z_yk_sorted H_z_y_sorted[:, k] = H_z_yk_sorted RMIG.append(RMIG_yk) JEMMI.append(JEMMI_yk) print_("") print_("ids_sorted: {}".format(ids_sorted)) print_("MI_z_yk_sorted: {}".format(MI_z_yk_sorted)) print_("RMIG_yk: {}".format(RMIG_yk)) print_("JEMMI_yk: {}".format(JEMMI_yk)) z_yk_prob_file = join( save_dir, "z_yk_prob_4_{}[bins={},bin_limits={},data={}].npz".format( factor, num_bins, bin_limits, data_proportion)) np.savez_compressed(z_yk_prob_file, Q_z_yk=Q_z_yk) print_("#" + "=" * 50 + "#") results = { "MI_z_y": MI_z_y, "H_z_y": H_z_y, "ids_sorted": ids_sorted, "MI_z_y_sorted": MI_z_y_sorted, "H_z_y_sorted": H_z_y_sorted, "H_z": H_z, "H_y": np.asarray(H_y, dtype=np.float32), "RMIG": np.asarray(RMIG, dtype=np.float32), "JEMMI": np.asarray(JEMMI, dtype=np.float32), } result_file = join( save_dir, "results[bins={},bin_limits={},data={}].npz".format( num_bins, bin_limits, data_proportion)) np.savez_compressed(result_file, **results) f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format( args.activation)) if args.enc_dec_model == "1Konny": assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format( args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support encoder/decoder model '{}'!".format( args.enc_dec_model)) model = FactorVAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # ===================================== # Reconstruct # ======================================= # seed = 341 rs = np.random.RandomState(seed) ids = rs.choice(celebA_loader.num_test_data, size=15) x = celebA_loader.sample_images_from_dataset(sess, 'test', ids) save_dir = make_dir_if_not_exist(join(args.save_dir, args.run)) img_file = join(save_dir, 'x_test.png') save_img_block(img_file, binary_float_to_uint8(np.expand_dims(x, axis=0))) img_file = join(save_dir, 'recx_test_1.png') model.reconstruct_images(img_file, sess, x, block_shape=[1, len(ids)], batch_size=-1, show_original_images=False, dec_output_2_img_func=binary_float_to_uint8) img_file = join(save_dir, 'recx_test_2.png') model.reconstruct_images(img_file, sess, x, block_shape=[1, len(ids)], batch_size=-1, show_original_images=True, dec_output_2_img_func=binary_float_to_uint8)
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format( args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support encoder/decoder model '{}'!".format( args.enc_dec_model)) model = AAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=False) # save_dir = make_dir_if_not_exist(save_dir) save_dir = make_dir_if_not_exist( join(args.save_dir, "AAE_{}".format(args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion num_data = int(data_proportion * celebA_loader.num_train_data) eps = 1e-8 # file f = open(join( save_dir, 'log[bins={},bin_limits={},data={}].txt'.format( num_bins, bin_limits, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) ''' if attr_type == 0: attr_names = celebA_loader.attributes elif attr_type == 1: attr_names = ['Male', 'Black_Hair', 'Blond_Hair', 'Straight_Hair', 'Wavy_Hair', 'Bald', 'Oval_Face', 'Big_Nose', 'Chubby', 'Double_Chin', 'Goatee', 'No_Beard', 'Mouth_Slightly_Open', 'Smiling', 'Eyeglasses', 'Pale_Skin'] else: raise ValueError("Only support factor_type=0 or 1!") ''' print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) # Compute bins # ================================= # print_("") print_("bin_limits: {}".format(bin_limits)) assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[ 1], "bin_limits={}".format(bin_limits) bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True) print_("bins: {}".format(bins)) assert len(bins) == num_bins + 1 bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))] print_("bin_widths: {}".format(bin_widths)) assert len(bin_widths ) == num_bins, "len(bin_widths)={} while num_bins={}!".format( len(bin_widths), num_bins) assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths) bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))] print_("bin_centers: {}".format(bin_centers)) assert len(bin_centers ) == num_bins, "len(bin_centers)={} while num_bins={}!".format( len(bin_centers), num_bins) # ================================= # # Compute representations # ================================= # z_data_attr_file = join(save_dir, "z_data[data={}].npz".format(data_proportion)) if not exists(z_data_attr_file): all_z_mean = [] all_z_stddev = [] all_attrs = [] print("") print("Compute all_z_mean, all_z_stddev and all_attrs!") count = 0 for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False): x = celebA_loader.sample_images_from_dataset( sess, 'train', batch_ids) attrs = celebA_loader.sample_attrs_from_dataset('train', batch_ids) assert attrs.shape[1] == celebA_loader.num_attributes z_mean, z_stddev = sess.run(model.get_output( ['z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) all_attrs.append(attrs) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) all_attrs = np.concatenate(all_attrs, axis=0) np.savez_compressed(z_data_attr_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev, all_attrs=all_attrs) else: print("{} exists. Load data from file!".format(z_data_attr_file)) with np.load(z_data_attr_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] all_attrs = f['all_attrs'] print_("") print_("all_z_mean.shape: {}".format(all_z_mean.shape)) print_("all_z_stddev.shape: {}".format(all_z_stddev.shape)) print_("all_attrs.shape: {}".format(all_attrs.shape)) # ================================= # # Compute the probability mass function for ground truth factors # ================================= # num_attrs = all_attrs.shape[1] assert all_attrs.dtype == np.bool all_attrs = all_attrs.astype(np.int32) # (num_samples, num_attrs, 2) # The first component is 1 and the last component is 0 all_Q_y_cond_x = np.stack([all_attrs, 1 - all_attrs], axis=-1) # ================================= # # Compute Q(zi|x) # Compute I(zi, yk) # ================================= # Q_z_y = np.zeros([args.z_dim, num_attrs, num_bins, 2], dtype=np.float32) MI_z_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32) H_z_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32) H_z_4_diff_y = np.zeros([args.z_dim, num_attrs], dtype=np.float32) H_y_4_diff_z = np.zeros([num_attrs, args.z_dim], dtype=np.float32) for i in range(args.z_dim): print_("") print_("Compute all_Q_z{}_cond_x!".format(i)) # Q_s_cond_x all_Q_s_cond_x = [] for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True): # (batch_size, num_bins) q_s_cond_x = normal_density( np.expand_dims(bin_centers, axis=0), mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1), stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1)) # (batch_size, num_bins) max_q_s_cond_x = np.max(q_s_cond_x, axis=-1) # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x))) # (batch_size, num_bins) deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32) # (batch_size, num_bins) Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0) Q_s_cond_x = Q_s_cond_x / np.maximum( np.sum(Q_s_cond_x, axis=1, keepdims=True), eps) # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) Q_s_cond_x = np.where( np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1), deter_s_cond_x, Q_s_cond_x) # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) all_Q_s_cond_x.append(Q_s_cond_x) # (num_samples, num_bins) all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0) assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \ "sorted_all_Q_s_cond_x[:30]:\n{}!".format( np.sort(all_Q_s_cond_x[:30], axis=None)) assert len(all_Q_s_cond_x) == len( all_attrs), "all_Q_s_cond_x.shape={}, all_attrs.shape={}".format( all_Q_s_cond_x.shape, all_attrs.shape) # I(z, y) for k in range(num_attrs): # Compute Q(zi, yk) # -------------------------------- # # (z_dim, 2) Q_zi_yk = np.matmul(np.transpose(all_Q_s_cond_x, axes=[1, 0]), all_Q_y_cond_x[:, k, :]) Q_zi_yk = Q_zi_yk / len(all_Q_y_cond_x) Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps) assert np.all(Q_zi_yk >= 0), "'Q_zi_yk' contains negative values. " \ "sorted_Q_zi_yk[:10]:\n{}!".format(np.sort(Q_zi_yk, axis=None)) log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps)) Q_z_y[i, k] = Q_zi_yk print_("sum(Q_zi_yk): {}".format(np.sum(Q_zi_yk))) # -------------------------------- # # Compute Q_z # -------------------------------- # Q_zi = np.sum(Q_zi_yk, axis=1) log_Q_zi = np.log(np.clip(Q_zi, eps, 1 - eps)) print_("sum(Q_z{}): {}".format(i, np.sum(Q_zi))) print_("Q_z{}: {}".format(i, Q_zi)) # -------------------------------- # # Compute Q_y # -------------------------------- # Q_yk = np.sum(Q_zi_yk, axis=0) log_Q_yk = np.log(np.clip(Q_yk, eps, 1 - eps)) print_("sum(Q_y{}): {}".format(k, np.sum(Q_yk))) print_("Q_y{}: {}".format(k, np.sum(Q_yk))) # -------------------------------- # MI_zi_yk = Q_zi_yk * (log_Q_zi_yk - np.expand_dims( log_Q_zi, axis=-1) - np.expand_dims(log_Q_yk, axis=0)) MI_zi_yk = np.sum(MI_zi_yk) H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk) H_zi = -np.sum(Q_zi * log_Q_zi) H_yk = -np.sum(Q_yk * log_Q_yk) MI_z_y[i, k] = MI_zi_yk H_z_y[i, k] = H_zi_yk H_z_4_diff_y[i, k] = H_zi H_y_4_diff_z[k, i] = H_yk # ================================= # print_("") print_("MI_z_y:\n{}".format(MI_z_y)) print_("H_z_y:\n{}".format(H_z_y)) print_("H_z_4_diff_y:\n{}".format(H_z_4_diff_y)) print_("H_y_4_diff_z:\n{}".format(H_y_4_diff_z)) # Compute metric # ================================= # # Sorted in decreasing order MI_ids_sorted = np.argsort(MI_z_y, axis=0)[::-1] MI_sorted = np.take_along_axis(MI_z_y, MI_ids_sorted, axis=0) MI_gap_y = np.divide(MI_sorted[0, :] - MI_sorted[1, :], H_y_4_diff_z[:, 0]) MIG = np.mean(MI_gap_y) print_("") print_("MI_sorted: {}".format(MI_sorted)) print_("MI_ids_sorted: {}".format(MI_ids_sorted)) print_("MI_gap_y: {}".format(MI_gap_y)) print_("MIG: {}".format(MIG)) results = { 'Q_z_y': Q_z_y, 'MI_z_y': MI_z_y, 'H_z_y': H_z_y, 'H_z_4_diff_y': H_z_4_diff_y, 'H_y_4_diff_z': H_y_4_diff_z, 'MI_sorted': MI_sorted, 'MI_ids_sorted': MI_ids_sorted, 'MI_gap_y': MI_gap_y, 'MIG': MIG, } result_file = join( save_dir, 'results[bins={},bin_limits={},data={}].npz'.format( num_bins, bin_limits, data_proportion)) np.savez_compressed(result_file, **results) # ================================= # f.close()
def main(args): # Load config # ===================================== # with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # # Load dataset # ===================================== # data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y y_train = f['latents_classes'][:, 1:] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) num_train = len(x_train) print("num_train: {}".format(num_train)) # ===================================== # # Build model # ===================================== # if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = AAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # # Initialize session # ===================================== # config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) train_helper.load(sess, load_step=args.load_step) # ===================================== # # Experiments # ===================================== # save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True) # ===================================== # # Compute representations # ===================================== # z_data_file = join(save_dir, "z_data.npz") if not exists(z_data_file): all_z_mean = [] all_z_stddev = [] print("") print("Compute all_z_mean, all_z_stddev!") count = 0 for batch_ids in iterate_data(num_train, 10 * args.batch_size, shuffle=False): x = x_train[batch_ids] z_samples, z_mean, z_stddev = sess.run(model.get_output( ['z1_gen', 'z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev) else: print("{} exists. Load data from file!".format(z_data_file)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] # ===================================== # cont_mask = [False, True, True, True, True ] if args.continuous_only else None if args.classifier == "LASSO": results = compute_metrics_with_LASSO(latents=all_z_mean, factors=y_train, params={ 'alpha': args.LASSO_alpha, 'max_iter': args.LASSO_iters }, cont_mask=cont_mask) result_file = join( save_dir, "results[LASSO,{},alpha={},iters={}].npz".format( "cont" if args.continuous_only else "all", args.LASSO_alpha, args.LASSO_iters)) else: results = compute_metrics_with_RandomForest(latents=all_z_mean, factors=y_train, params={ 'n_estimators': args.RF_trees, 'max_depth': args.RF_depth }) result_file = join( save_dir, "results[RF,{},trees={},depth={}].npz".format( "cont" if args.continuous_only else "all", args.RF_trees, args.RF_depth)) np.savez_compressed(result_file, **results)
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format(args.activation)) if args.enc_dec_model == "1Konny": assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny(num_outputs=2) elif args.enc_dec_model == "my": assert args.z_dim == 150, "For 1Konny, z_dim must be 150. Found {}!".format(args.z_dim) encoder = Encoder_My(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_My([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_My(num_outputs=2) else: raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model)) model = FactorVAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = remove_dir_if_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)), ask_4_permission=False) save_dir = make_dir_if_not_exist(save_dir) # ===================================== # z correlation matrix # ======================================= # for deterministic in [True, False]: all_z = [] for batch_ids in iterate_data(celebA_loader.num_train_data, args.batch_size, shuffle=False): x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids) z = model.encode(sess, x, deterministic=deterministic) assert len(z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format(z.shape) all_z.append(z) all_z = np.concatenate(all_z, axis=0) # plot_corrmat(join(save_dir, "corr_mat[deter={}].png".format(deterministic)), all_z, # font={'size': 14}, # subplot_adjust={'left': 0.04, 'right': 0.96, 'bottom': 0.02, 'top': 0.98}, # size_inches=(7.2, 6)) plot_corrmat_with_histogram(join(save_dir, "corr_mat_hist[deter={}].png".format(deterministic)), all_z, font={'size': 14}, subplot_adjust={'left': 0.04, 'right': 0.96, 'bottom': 0.02, 'top': 0.98}, size_inches=(10, 3))
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y y_train = f['latents_classes'] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) num_train = len(x_train) print("num_train: {}".format(num_train)) print("y_train[:10]: {}".format(y_train[:10])) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = FactorVAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, 'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion num_data = int(data_proportion * num_train) assert num_data == num_train, "For dSprites, you must use all data!" eps = 1e-8 # file f = open(join( save_dir, 'log[bins={},bin_limits={},data={}].txt'.format( num_bins, bin_limits, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) # Compute bins # ================================= # print_("") print_("bin_limits: {}".format(bin_limits)) assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[ 1], "bin_limits={}".format(bin_limits) bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True) print_("bins: {}".format(bins)) assert len(bins) == num_bins + 1 bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))] print_("bin_widths: {}".format(bin_widths)) assert len(bin_widths ) == num_bins, "len(bin_widths)={} while num_bins={}!".format( len(bin_widths), num_bins) assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths) bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))] print_("bin_centers: {}".format(bin_centers)) assert len(bin_centers ) == num_bins, "len(bin_centers)={} while num_bins={}!".format( len(bin_centers), num_bins) # ================================= # # Compute representations # ================================= # z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion)) if not exists(z_data_file): all_z_mean = [] all_z_stddev = [] print("") print("Compute all_z_mean, all_z_stddev and all_attrs!") count = 0 for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False): x = x_train[batch_ids] z_mean, z_stddev = sess.run(model.get_output( ['z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev) else: print("{} exists. Load data from file!".format(z_data_file)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] # ================================= # # Compute mutual information # ================================= # H_z = [] H_z_cond_x = [] MI_z_x = [] norm_MI_z_x = [] Q_z_cond_x = [] for i in range(args.z_dim): print_("") print_("Compute I(z{}, x)!".format(i)) # Q_s_cond_x all_Q_s_cond_x = [] for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True): # (batch_size, num_bins) q_s_cond_x = normal_density( np.expand_dims(bin_centers, axis=0), mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1), stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1)) # (batch_size, num_bins) max_q_s_cond_x = np.max(q_s_cond_x, axis=-1) # print("max_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x))) # (batch_size, num_bins) deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32) # (batch_size, num_bins) Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0) Q_s_cond_x = Q_s_cond_x / np.maximum( np.sum(Q_s_cond_x, axis=1, keepdims=True), eps) # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) Q_s_cond_x = np.where( np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1), deter_s_cond_x, Q_s_cond_x) # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) all_Q_s_cond_x.append(Q_s_cond_x) all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0) print_("sort(sum(all_Q_s_cond_x))[:10]: {}".format( np.sort(np.sum(all_Q_s_cond_x, axis=-1), axis=0)[:100])) assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \ "sorted_all_Q_s_cond_x[:30]:\n{}!".format( np.sort(all_Q_s_cond_x[:30], axis=None)) Q_z_cond_x.append(all_Q_s_cond_x) H_zi_cond_x = -np.mean(np.sum( all_Q_s_cond_x * np.log(np.maximum(all_Q_s_cond_x, eps)), axis=1), axis=0) # Q_s Q_s = np.mean(all_Q_s_cond_x, axis=0) print_("Q_s: {}".format(Q_s)) print_("sum(Q_s): {}".format(sum(Q_s))) assert np.all(Q_s >= 0), "'Q_s' contains negative values. " \ "sorted_Q_s[:10]:\n{}!".format(np.sort(Q_s, axis=None)) Q_s = Q_s / np.sum(Q_s, axis=0) print_("sum(Q_s) (normalized): {}".format(sum(Q_s))) H_zi = -np.sum(Q_s * np.log(np.maximum(Q_s, eps)), axis=0) MI_zi_x = H_zi - H_zi_cond_x normalized_MI_zi_x = (1.0 * MI_zi_x) / (H_zi + eps) print_("H_zi: {}".format(H_zi)) print_("H_zi_cond_x: {}".format(H_zi_cond_x)) print_("MI_zi_x: {}".format(MI_zi_x)) print_("normalized_MI_zi_x: {}".format(normalized_MI_zi_x)) H_z.append(H_zi) H_z_cond_x.append(H_zi_cond_x) MI_z_x.append(MI_zi_x) norm_MI_z_x.append(normalized_MI_zi_x) H_z = np.asarray(H_z, dtype=np.float32) H_z_cond_x = np.asarray(H_z_cond_x, dtype=np.float32) MI_z_x = np.asarray(MI_z_x, dtype=np.float32) norm_MI_z_x = np.asarray(norm_MI_z_x, dtype=np.float32) print_("") print_("H_z: {}".format(H_z)) print_("H_z_cond_x: {}".format(H_z_cond_x)) print_("MI_z_x: {}".format(MI_z_x)) print_("norm_MI_z_x: {}".format(norm_MI_z_x)) sorted_z_comps = np.argsort(MI_z_x, axis=0)[::-1] sorted_MI_z_x = np.take_along_axis(MI_z_x, sorted_z_comps, axis=0) print_("sorted_MI_z_x: {}".format(sorted_MI_z_x)) print_("sorted_z_comps: {}".format(sorted_z_comps)) sorted_norm_z_comps = np.argsort(norm_MI_z_x, axis=0)[::-1] sorted_norm_MI_z_x = np.take_along_axis(norm_MI_z_x, sorted_norm_z_comps, axis=0) print_("sorted_norm_MI_z_x: {}".format(sorted_norm_MI_z_x)) print_("sorted_norm_z_comps: {}".format(sorted_norm_z_comps)) result_file = join( save_dir, 'results[bins={},bin_limits={},data={}].npz'.format( num_bins, bin_limits, data_proportion)) np.savez_compressed(result_file, H_z=H_z, H_z_cond_x=H_z_cond_x, MI_z_x=MI_z_x, norm_MI_z_x=norm_MI_z_x, sorted_MI_z_x=sorted_MI_z_x, sorted_z_comps=sorted_z_comps, sorted_norm_MI_z_x=sorted_norm_MI_z_x, sorted_norm_z_comps=sorted_norm_z_comps) Q_z_cond_x = np.asarray(Q_z_cond_x, dtype=np.float32) z_prob_file = join( save_dir, 'z_prob[bins={},bin_limits={},data={}].npz'.format( num_bins, bin_limits, data_proportion)) np.savez_compressed(z_prob_file, Q_z_cond_x=Q_z_cond_x) # ================================= # f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format( args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support encoder/decoder model '{}'!".format( args.enc_dec_model)) model = FactorVAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # save_dir = remove_dir_if_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)), ask_4_permission=False) # save_dir = make_dir_if_not_exist(save_dir) save_dir = make_dir_if_not_exist( join(args.save_dir, "FactorVAE_{}".format(args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion num_data = int(data_proportion * celebA_loader.num_train_data) top_k = args.top_k eps = 1e-8 # file f = open(join( save_dir, 'log[bins={},bin_limits={},data={}].txt'.format( num_bins, bin_limits, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) print_("top_k: {}".format(top_k)) # Compute bins # ================================= # print_("") print_("bin_limits: {}".format(bin_limits)) assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[ 1], "bin_limits={}".format(bin_limits) bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True) print_("bins: {}".format(bins)) assert len(bins) == num_bins + 1 bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))] print_("bin_widths: {}".format(bin_widths)) assert len(bin_widths ) == num_bins, "len(bin_widths)={} while num_bins={}!".format( len(bin_widths), num_bins) assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths) bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))] print_("bin_centers: {}".format(bin_centers)) assert len(bin_centers ) == num_bins, "len(bin_centers)={} while num_bins={}!".format( len(bin_centers), num_bins) # ================================= # # Compute representations # ================================= # z_data_file = join(args.informativeness_metrics_dir, "FactorVAE_{}".format(args.run), "z_data[data={}].npz".format(data_proportion)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] print_("") print_("all_z_mean.shape: {}".format(all_z_mean.shape)) print_("all_z_stddev.shape: {}".format(all_z_stddev.shape)) # ================================= # # Compute the mutual information # ================================= # mi_file = join( args.informativeness_metrics_dir, "FactorVAE_{}".format(args.run), 'results[bins={},bin_limits={},data={}].npz'.format( num_bins, bin_limits, data_proportion)) with np.load(mi_file, "r") as f: sorted_MI_z_x = f['sorted_MI_z_x'] sorted_z_ids = f['sorted_z_comps'] H_z = f['H_z'] if top_k > 0: top_MI = sorted_MI_z_x[:top_k] top_z_ids = sorted_z_ids[:top_k] bot_MI = sorted_MI_z_x[-top_k:] bot_z_ids = sorted_z_ids[-top_k:] top_bot_MI = np.concatenate([top_MI, bot_MI], axis=0) top_bot_z_ids = np.concatenate([top_z_ids, bot_z_ids], axis=0) print_("top MI: {}".format(top_MI)) print_("top_z_ids: {}".format(top_z_ids)) print_("bot MI: {}".format(bot_MI)) print_("bot_z_ids: {}".format(bot_z_ids)) else: top_bot_MI = sorted_MI_z_x top_bot_z_ids = sorted_z_ids # ================================= # H_z1z2_mean_mat = np.full( [len(top_bot_z_ids), len(top_bot_z_ids)], -1, dtype=np.float32) MI_z1z2_mean_mat = np.full( [len(top_bot_z_ids), len(top_bot_z_ids)], -1, dtype=np.float32) H_z1z2_mean = [] MI_z1z2_mean = [] z1z2_ids = [] # Compute the mutual information # ================================= # for i in range(len(top_bot_z_ids)): z_idx1 = top_bot_z_ids[i] H_s1 = H_z[z_idx1] for j in range(i + 1, len(top_bot_z_ids)): z_idx2 = top_bot_z_ids[j] H_s2 = H_z[z_idx2] print_("") print_("Compute MI(z{}_mean, z{}_mean)!".format(z_idx1, z_idx2)) s1s2_mean_counter = np.zeros([num_bins, num_bins], dtype=np.int32) for batch_ids in iterate_data(len(all_z_mean), 100, shuffle=False, include_remaining=True): s1 = at_bin(all_z_mean[batch_ids, z_idx1], bins, one_hot=False) s2 = at_bin(all_z_mean[batch_ids, z_idx2], bins, one_hot=False) for s1_, s2_ in zip(s1, s2): s1s2_mean_counter[s1_, s2_] += 1 # I(s1, s2) = Q(s1, s2) * (log Q(s1, s2) - log Q(s1) log Q(s2)) # ---------------------------------- # Q_s1s2_mean = (s1s2_mean_counter * 1.0) / np.sum(s1s2_mean_counter).astype(np.float32) log_Q_s1s2_mean = np.log(np.maximum(Q_s1s2_mean, eps)) H_s1s2_mean = -np.sum(Q_s1s2_mean * log_Q_s1s2_mean) MI_s1s2_mean = H_s1 + H_s2 - H_s1s2_mean print_("H_s1: {}".format(H_s1)) print_("H_s2: {}".format(H_s2)) print_("H_s1s2_mean: {}".format(H_s1s2_mean)) print_("MI_s1s2_mean: {}".format(MI_s1s2_mean)) H_z1z2_mean.append(H_s1s2_mean) MI_z1z2_mean.append(MI_s1s2_mean) z1z2_ids.append((z_idx1, z_idx2)) H_z1z2_mean_mat[i, j] = H_s1s2_mean H_z1z2_mean_mat[j, i] = H_s1s2_mean MI_z1z2_mean_mat[i, j] = MI_s1s2_mean MI_z1z2_mean_mat[j, i] = MI_s1s2_mean H_z1z2_mean = np.asarray(H_z1z2_mean, dtype=np.float32) MI_z1z2_mean = np.asarray(MI_z1z2_mean, dtype=np.float32) z1z2_ids = np.asarray(z1z2_ids, dtype=np.int32) result_file = join( save_dir, "results[bins={},bin_limits={},data={},k={}].npz".format( num_bins, bin_limits, data_proportion, top_k)) results = { 'H_z1z2_mean': H_z1z2_mean, 'MI_z1z2_mean': MI_z1z2_mean, 'H_z1z2_mean_mat': H_z1z2_mean_mat, 'MI_z1z2_mean_mat': MI_z1z2_mean_mat, 'z1z2_ids': z1z2_ids, } np.savez_compressed(result_file, **results) # ================================= # f.close()
def main(args): # ===================================== # Load config # ===================================== with open(os.path.join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Preparation # ===================================== celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir) num_train = celebA_loader.num_train_data num_test = celebA_loader.num_test_data img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) # ===================================== # Instantiate models # ===================================== # Only use activation for encoder and decoder if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format( args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support encoder/decoder model '{}'!".format( args.enc_dec_model)) model = AAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_list() # ===================================== # TF Graph Handler asset_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "asset")) img_eval = remove_dir_if_exist(os.path.join(asset_dir, "img_eval"), ask_4_permission=False) img_eval = make_dir_if_not_exist(img_eval) img_x_gen = make_dir_if_not_exist(os.path.join(img_eval, "x_gen")) img_x_rec = make_dir_if_not_exist(os.path.join(img_eval, "x_rec")) img_z_rand_2_traversal = make_dir_if_not_exist( os.path.join(img_eval, "z_rand_2_traversal")) img_z_cond_all_traversal = make_dir_if_not_exist( os.path.join(img_eval, "z_cond_all_traversal")) img_z_cond_1_traversal = make_dir_if_not_exist( os.path.join(img_eval, "z_cond_1_traversal")) img_z_corr = make_dir_if_not_exist(os.path.join(img_eval, "z_corr")) img_z_dist = make_dir_if_not_exist(os.path.join(img_eval, "z_dist")) img_z_stat_dist = make_dir_if_not_exist( os.path.join(img_eval, "z_stat_dist")) # img_rec_error_dist = make_dir_if_not_exist(os.path.join(img_eval, "rec_error_dist")) model_dir = make_dir_if_not_exist(os.path.join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # ===================================== # ===================================== # Training Loop # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) # Load model train_helper.load(sess, load_step=args.load_step) # ''' # Generation # ======================================= # z = np.random.randn(64, args.z_dim) img_file = os.path.join(img_x_gen, 'x_gen_test.png') model.generate_images(img_file, sess, z, block_shape=[8, 8], batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # ======================================= # # ''' # ''' # Reconstruction # ======================================= # seed = 389 x = celebA_loader.sample_images_from_dataset(sess, 'test', list(range(seed, seed + 64))) img_file = os.path.join(img_x_rec, 'x_rec_test.png') model.reconstruct_images(img_file, sess, x, block_shape=[8, 8], batch_size=-1, dec_output_2_img_func=binary_float_to_uint8) # ======================================= # # ''' # ''' # z random traversal # ======================================= # if args.z_dim <= 5: print("z_dim = {}, perform random traversal!".format(args.z_dim)) # Plot z cont with z cont z_zero = np.zeros([args.z_dim], dtype=np.float32) z_rand = np.random.randn(args.z_dim) z_start, z_stop = -4, 4 num_points = 8 for i in range(args.z_dim): for j in range(i + 1, args.z_dim): print( "Plot random 2 comps z traverse with {} and {} components!" .format(i, j)) img_file = os.path.join(img_z_rand_2_traversal, 'z[{},{},zero].png'.format(i, j)) model.rand_2_latents_traverse( img_file, sess, default_z=z_zero, z_comp1=i, start1=z_start, stop1=z_stop, num_points1=num_points, z_comp2=j, start2=z_start, stop2=z_stop, num_points2=num_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) img_file = os.path.join(img_z_rand_2_traversal, 'z[{},{},rand].png'.format(i, j)) model.rand_2_latents_traverse( img_file, sess, default_z=z_rand, z_comp1=i, start1=z_start, stop1=z_stop, num_points1=num_points, z_comp2=j, start2=z_stop, stop2=z_stop, num_points2=num_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # ======================================= # # ''' # z conditional traversal (all features + one feature) # ======================================= # seed = 389 num_samples = 30 data = celebA_loader.sample_images_from_dataset( sess, 'train', list(range(seed, seed + num_samples))) z_start, z_stop = -4, 4 num_itpl_points = 8 for n in range(num_samples): print("Plot conditional all comps z traverse with test sample {}!". format(n)) img_file = os.path.join(img_z_cond_all_traversal, 'x_train{}.png'.format(n)) model.cond_all_latents_traverse( img_file, sess, data[n], start=z_start, stop=z_stop, num_itpl_points=num_itpl_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) z_start, z_stop = -4, 4 num_itpl_points = 8 for i in range(args.z_dim): print("Plot conditional z traverse with comp {}!".format(i)) img_file = os.path.join( img_z_cond_1_traversal, 'x_train[{},{}]_z{}.png'.format(seed, seed + num_samples, i)) model.cond_1_latent_traverse( img_file, sess, data, z_comp=i, start=z_start, stop=z_stop, num_itpl_points=num_itpl_points, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # ======================================= # # ''' # ''' # z correlation matrix # ======================================= # all_z = [] for batch_ids in iterate_data(num_train, args.batch_size, shuffle=False): x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids) z = model.encode(sess, x) assert len( z.shape) == 2 and z.shape[1] == args.z_dim, "z.shape: {}".format( z.shape) all_z.append(z) all_z = np.concatenate(all_z, axis=0) print("Start plotting!") plot_corrmat_with_histogram(os.path.join(img_z_corr, "corr_mat.png"), all_z) plot_comp_dist(os.path.join(img_z_dist, 'z_{}'), all_z, x_lim=(-5, 5)) print("Done!") # ======================================= # # ''' # ''' # z gaussian stddev # ======================================= # print("\nPlot z mean and stddev!") all_z_mean = [] all_z_stddev = [] for batch_ids in iterate_data(num_train, args.batch_size, shuffle=False): x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids) z_mean, z_stddev = sess.run(model.get_output(['z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) plot_comp_dist(os.path.join(img_z_stat_dist, 'z_mean_{}.png'), all_z_mean, x_lim=(-5, 5)) plot_comp_dist(os.path.join(img_z_stat_dist, 'z_stddev_{}.png'), all_z_stddev, x_lim=(-0.5, 3))
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) num_train = celebA_loader.num_train_data # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format( args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support encoder/decoder model '{}'!".format( args.enc_dec_model)) model = AAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=True) # save_dir = make_dir_if_not_exist(save_dir) save_dir = make_dir_if_not_exist( join(args.save_dir, "AAE_{}".format(args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion f = open(join( save_dir, 'log[bins={},bin_limits={},data={}].txt'.format( num_bins, bin_limits, data_proportion)), mode='w') print_ = functools.partial(print_both, file=f) result_file = join( args.interpretability_metrics_dir, "AAE_{}".format(args.run), "results[bins={},bin_limits={},data={}].npz".format( num_bins, bin_limits, data_proportion)) results = np.load(result_file, "r") print_("") print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) # Plotting # =========================================== # # seed = 389 # num_samples = 30 seed = 398 num_samples = 1 ids = list(range(seed, seed + num_samples)) print_("\nids: {}".format(ids)) data = celebA_loader.sample_images_from_dataset(sess, 'train', ids) span = 3 points_one_side = 5 attr_names = celebA_loader.attributes print_("attr_names: {}".format(attr_names)) print_("results.keys: {}".format(list(results.keys()))) # (z_dim, num_attrs) MI_ids_sorted = results['MI_ids_sorted'] MI_sorted = results['MI_sorted'] MI_gap_y = results['MI_gap_y'] H_y = results['H_y_4_diff_z'][:, 0] assert MI_ids_sorted.shape[1] == len(attr_names) == len(MI_gap_y) == len(H_y), \ "MI_ids_sorted.shape: {}, len(attr_names): {}, len(MI_gap_y): {}, len(H_y): {}".format( MI_ids_sorted.shape, len(attr_names), len(MI_gap_y), len(H_y)) print_("\nShow RMIG!") for i in range(len(attr_names)): print("{}: RMIG: {:.4f}, RMIG (unnorm): {:.4f}, H: {:.4f}".format( attr_names[i], MI_gap_y[i], MI_gap_y[i] * H_y[i], H_y[i])) print_("\nShow JEMMI!") H_z_y = results['H_z_y'] MI_z_y = results['MI_z_y'] ids_sorted_by_MI = np.argsort(MI_z_y, axis=0)[::-1] MI_z_y_sorted = np.take_along_axis(MI_z_y, ids_sorted_by_MI, axis=0) H_z_y_sorted = np.take_along_axis(H_z_y, ids_sorted_by_MI, axis=0) H_diff = H_z_y_sorted[0, :] - MI_z_y_sorted[0, :] JEMMI_unnorm = H_diff + MI_z_y_sorted[1, :] JEMMI_norm = JEMMI_unnorm / (np.log(num_bins) + H_y) for i in range(len(attr_names)): print( "{}: JEMMI: {:.4f}, JEMMI (unnorm): {:.4f}, H_diff: {:.4f}, I2: {:.4f}, top 2 latents: z{}, z{}" .format(attr_names[i], JEMMI_norm[i], JEMMI_unnorm[i], H_diff[i], MI_z_y_sorted[1, i], ids_sorted_by_MI[0, i], ids_sorted_by_MI[1, i])) # Uncomment if you want ''' for n in range(len(ids)): for k in range(len(attr_names)): MI_ids_top10 = MI_ids_sorted[:10, k] MI_top10 = MI_sorted[:10, k] print("Plot top 10 latents for factor '{}'!".format(attr_names[k])) img_file = join(save_dir, "x_train[{}][attr={}][bins={},bin_limits={},data={}].png". format(ids[n], attr_names[k], num_bins, bin_limits, data_proportion)) model.cond_all_latents_traverse_v2(img_file, sess, data[n], z_comps=MI_ids_top10, z_comp_labels=["z[{}] ({:.4f})".format(comp, mi) for comp, mi in zip(MI_ids_top10, MI_top10)], span=span, points_1_side=points_one_side, hl_x=True, font_size=9, title="{} (MI gap={:.4f}, H={:.4f})".format( attr_names[k], MI_gap_y[k], H_y[k]), title_font_scale=1.5, subplot_adjust={'left': 0.16, 'right': 0.99, 'bottom': 0.01, 'top': 0.95}, size_inches=(6.5, 5.2), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) ''' # Top 5 only for n in range(len(ids)): for k in range(len(attr_names)): MI_ids_top10 = MI_ids_sorted[:5, k] MI_top10 = MI_sorted[:5, k] print("Plot top 5 latents for factor '{}'!".format(attr_names[k])) img_file = join( save_dir, "train{}_attr={}_bins={}_data={}.png".format( ids[n], attr_names[k], num_bins, data_proportion)) model.cond_all_latents_traverse_v2( img_file, sess, data[n], z_comps=MI_ids_top10, z_comp_labels=[ "z[{}] ({:.4f})".format(comp, mi) for comp, mi in zip(MI_ids_top10, MI_top10) ], span=span, points_1_side=points_one_side, hl_x=True, font_size=9, title="{} (MI gap={:.4f}, H={:.4f})".format( attr_names[k], MI_gap_y[k], H_y[k]), title_font_scale=1.5, subplot_adjust={ 'left': 0.16, 'right': 0.99, 'bottom': 0.005, 'top': 0.93 }, size_inches=(6.5, 2.8), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) '''
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebAWithAttrLoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format(args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support encoder/decoder model '{}'!".format(args.enc_dec_model)) model = AAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # save_dir = remove_dir_if_exist(join(args.save_dir, "AAE_{}".format(args.run)), ask_4_permission=True) # save_dir = make_dir_if_not_exist(save_dir) save_dir = make_dir_if_not_exist(join(args.save_dir, "AAE_{}".format(args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=4, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion num_data = int(data_proportion * celebA_loader.num_train_data) eps = 1e-8 # file f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'. format(num_bins, bin_limits, data_proportion)), mode='w') # print function print_ = functools.partial(print_both, file=f) print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) # Compute representations # ================================= # z_data_file = join(save_dir, "z_data[data={}].npz".format(data_proportion)) if not exists(z_data_file): all_z_mean = [] all_z_stddev = [] print("") print("Compute all_z_mean and all_z_stddev!") count = 0 for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False): x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids) z_mean, z_stddev = sess.run( model.get_output(['z_mean', 'z_stddev']), feed_dict={model.is_train: False, model.x_ph: x}) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) np.savez_compressed(z_data_file, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev) else: print("{} exists. Load data from file!".format(z_data_file)) with np.load(z_data_file, "r") as f: all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] print_("") print_("all_z_mean.shape: {}".format(all_z_mean.shape)) print_("all_z_stddev.shape: {}".format(all_z_stddev.shape)) # ================================= # # Compute bins # ================================= # print_("") print_("bin_limits: {}".format(bin_limits)) assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[1], "bin_limits={}".format(bin_limits) bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True) print_("bins: {}".format(bins)) assert len(bins) == num_bins + 1 bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))] print_("bin_widths: {}".format(bin_widths)) assert len(bin_widths) == num_bins, "len(bin_widths)={} while num_bins={}!".format(len(bin_widths), num_bins) assert np.all(np.greater(bin_widths, 0)), "bin_widths: {}".format(bin_widths) bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))] print_("bin_centers: {}".format(bin_centers)) assert len(bin_centers) == num_bins, "len(bin_centers)={} while num_bins={}!".format(len(bin_centers), num_bins) # ================================= # # Compute mutual information # ================================= # H_z = [] H_z_cond_x = [] MI_z_x = [] norm_MI_z_x = [] Q_z_cond_x = [] Q_z = [] for i in range(args.z_dim): print_("") print_("Compute I(z{}, x)!".format(i)) # Q_s_cond_x all_Q_s_cond_x = [] for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True): # (batch_size, num_bins) q_s_cond_x = normal_density(np.expand_dims(bin_centers, axis=0), mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1), stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1)) # (batch_size, num_bins) max_q_s_cond_x = np.max(q_s_cond_x, axis=-1) # print("\nmax_q_s_cond_x: {}".format(np.sort(max_q_s_cond_x))) # (batch_size, num_bins) deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32) # (batch_size, num_bins) Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0) Q_s_cond_x = Q_s_cond_x / np.maximum(np.sum(Q_s_cond_x, axis=1, keepdims=True), eps) # print("sort(sum(Q_s_cond_x)) (before): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) Q_s_cond_x = np.where(np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1), deter_s_cond_x, Q_s_cond_x) # print("sort(sum(Q_s_cond_x)) (after): {}".format(np.sort(np.sum(Q_s_cond_x, axis=-1)))) all_Q_s_cond_x.append(Q_s_cond_x) all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0) print_("sort(sum(all_Q_s_cond_x))[:10]: {}".format( np.sort(np.sum(all_Q_s_cond_x, axis=-1), axis=0)[:100])) assert np.all(all_Q_s_cond_x >= 0), "'all_Q_s_cond_x' contains negative values. " \ "sorted_all_Q_s_cond_x[:30]:\n{}!".format(np.sort(all_Q_s_cond_x[:30], axis=None)) Q_z_cond_x.append(all_Q_s_cond_x) H_zi_cond_x = -np.mean(np.sum(all_Q_s_cond_x * np.log(np.maximum(all_Q_s_cond_x, eps)), axis=1), axis=0) # Q_s Q_s = np.mean(all_Q_s_cond_x, axis=0) print_("Q_s: {}".format(Q_s)) print_("sum(Q_s): {}".format(sum(Q_s))) assert np.all(Q_s >= 0), "'Q_s' contains negative values. " \ "sorted_Q_s[:10]:\n{}!".format(np.sort(Q_s, axis=None)) Q_s = Q_s / np.sum(Q_s, axis=0) print_("sum(Q_s) (normalized): {}".format(sum(Q_s))) Q_z.append(Q_s) H_zi = -np.sum(Q_s * np.log(np.maximum(Q_s, eps)), axis=0) MI_zi_x = H_zi - H_zi_cond_x normalized_MI_zi_x = (1.0 * MI_zi_x) / (H_zi + eps) print_("H_zi: {}".format(H_zi)) print_("H_zi_cond_x: {}".format(H_zi_cond_x)) print_("MI_zi_x: {}".format(MI_zi_x)) print_("normalized_MI_zi_x: {}".format(normalized_MI_zi_x)) H_z.append(H_zi) H_z_cond_x.append(H_zi_cond_x) MI_z_x.append(MI_zi_x) norm_MI_z_x.append(normalized_MI_zi_x) H_z = np.asarray(H_z, dtype=np.float32) H_z_cond_x = np.asarray(H_z_cond_x, dtype=np.float32) MI_z_x = np.asarray(MI_z_x, dtype=np.float32) norm_MI_z_x = np.asarray(norm_MI_z_x, dtype=np.float32) print_("") print_("H_z: {}".format(H_z)) print_("H_z_cond_x: {}".format(H_z_cond_x)) print_("MI_z_x: {}".format(MI_z_x)) print_("norm_MI_z_x: {}".format(norm_MI_z_x)) sorted_z_comps = np.argsort(MI_z_x, axis=0)[::-1] sorted_MI_z_x = np.take_along_axis(MI_z_x, sorted_z_comps, axis=0) print_("sorted_MI_z_x: {}".format(sorted_MI_z_x)) print_("sorted_z_comps: {}".format(sorted_z_comps)) sorted_norm_z_comps = np.argsort(norm_MI_z_x, axis=0)[::-1] sorted_norm_MI_z_x = np.take_along_axis(norm_MI_z_x, sorted_norm_z_comps, axis=0) print_("sorted_norm_MI_z_x: {}".format(sorted_norm_MI_z_x)) print_("sorted_norm_z_comps: {}".format(sorted_norm_z_comps)) result_file = join(save_dir, 'results[bins={},bin_limits={},data={}].npz'. format(num_bins, bin_limits, data_proportion)) np.savez_compressed(result_file, H_z=H_z, H_z_cond_x=H_z_cond_x, MI_z_x=MI_z_x, norm_MI_z_x=norm_MI_z_x, sorted_MI_z_x=sorted_MI_z_x, sorted_z_comps=sorted_z_comps, sorted_norm_MI_z_x=sorted_norm_MI_z_x, sorted_norm_z_comps=sorted_norm_z_comps) Q_z_cond_x = np.asarray(Q_z_cond_x, dtype=np.float32) Q_z = np.asarray(Q_z, dtype=np.float32) z_prob_file = join(save_dir, 'z_prob[bins={},bin_limits={},data={}].npz'. format(num_bins, bin_limits, data_proportion)) np.savez_compressed(z_prob_file, Q_z_cond_x=Q_z_cond_x, Q_z=Q_z)
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y y_train = f['latents_classes'] x_train = np.expand_dims(x_train.astype(np.float32), axis=-1) num_train = len(x_train) print("num_train: {}".format(num_train)) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = FactorVAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, 'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True) num_samples = args.num_samples print("num_samples: {}".format(num_samples)) # Compute representations # ================================= # z_data_file = join(save_dir, "z_data.npz") if not exists(z_data_file): all_z_samples = [] all_z_mean = [] all_z_stddev = [] print("") print("Compute all_z_mean, all_z_stddev and all_attrs!") count = 0 for batch_ids in iterate_data(num_train, 10 * args.batch_size, shuffle=False): x = x_train[batch_ids] z_samples, z_mean, z_stddev = sess.run(model.get_output( ['z1_gen', 'z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_samples.append(z_samples) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_samples = np.concatenate(all_z_samples, axis=0) all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) np.savez_compressed(z_data_file, all_z_samples=all_z_samples, all_z_mean=all_z_mean, all_z_stddev=all_z_stddev) else: print("{} exists. Load data from file!".format(z_data_file)) with np.load(z_data_file, "r") as f: all_z_samples = f['all_z_samples'] all_z_mean = f['all_z_mean'] all_z_stddev = f['all_z_stddev'] # ================================= # all_z_samples = np.reshape(all_z_samples, [3, 6, 40, 32, 32, -1]) all_z_mean = np.reshape(all_z_mean, [3, 6, 40, 32, 32, -1]) all_z_stddev = np.reshape(all_z_stddev, [3, 6, 40, 32, 32, -1]) if args.gpu_support == 'cupy': print("Use cupy instead of numpy!") results = MIG_4_dSprites_cupy(all_z_samples, all_z_mean, all_z_stddev, version=1, batch_size=10, num_samples=num_samples, gpu=args.gpu_id) else: results = MIG_4_dSprites(all_z_samples, all_z_mean, all_z_stddev, num_samples=num_samples, version=1, batch_size=200) result_file = join(save_dir, "results[num_samples={}].npz".format(num_samples)) np.savez_compressed(result_file, **results) f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] x_train = np.reshape(x_train, [3, 6, 40, 32, 32, 64, 64, 1]) # ===================================== # Instantiate model # ===================================== if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny() else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = AAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, stochastic_z=args.stochastic_z, use_gp0_z=True, gp0_z_mode=args.gp0_z_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'G_loss_z1_gen': args.G_loss_z1_gen_coeff, 'D_loss_z1_gen': args.D_loss_z1_gen_coeff, 'gp0_z': args.gp0_z_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments # save_dir = remove_dir_if_exist(join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run)), ask_4_permission=True) # save_dir = make_dir_if_not_exist(save_dir) save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) num_bins = args.num_bins bin_limits = tuple([float(s) for s in args.bin_limits.split(";")]) data_proportion = args.data_proportion # Logs f = open(join( save_dir, 'log[bins={},bin_limits={},data={}].txt'.format( num_bins, bin_limits, data_proportion)), mode='w') print_ = functools.partial(print_both, file=f) print_("") print_("num_bins: {}".format(num_bins)) print_("bin_limits: {}".format(bin_limits)) print_("data_proportion: {}".format(data_proportion)) # Results result_file = join( args.interpretability_metrics_dir, "{}_{}".format(args.enc_dec_model, args.run), "results[bins={},bin_limits={},data={}].npz".format( num_bins, bin_limits, data_proportion)) results = np.load(result_file, "r") print_("results.keys: {}".format(list(results.keys()))) # Plotting # =========================================== # data = [ x_train[0, 3, 20, 16, 16], x_train[1, 3, 20, 16, 16], x_train[2, 3, 20, 16, 16] ] gt_factors = ['Shape', 'Scale', 'Rotation', 'Pos_x', 'Pos_y'] ids_sorted = results['ids_sorted'] MI_z_y_sorted = results['MI_z_y_sorted'] H_z_y_sorted = results['H_z_y_sorted'] H_y = results['H_y'] RMIG = results['RMIG'] JEMMI = results['JEMMI'] print_("MI_z_y_sorted:\n{}".format(MI_z_y_sorted)) print_("\nShow RMIG!") for k in range(len(gt_factors)): print_( "{}, RMIG: {:.4f}, RMIG (unnorm): {:.4f}, H: {:.4f}, I1: {:.4f}, I2: {:.4f}" .format(gt_factors[k], RMIG[k], RMIG[k] * H_y[k], H_y[k], MI_z_y_sorted[0, k], MI_z_y_sorted[1, k])) print_("\nShow JEMMI!") for k in range(len(gt_factors)): print_( "{}, JEMMI: {:.4f}, JEMMI (unnorm): {:.4f}, H1: {:.4f}, H1-I1: {:.4f}, I2: {:.4f}, " "top2 ids: z{}, z{}".format( gt_factors[k], JEMMI[k], JEMMI[k] * (H_y[k] + np.log(num_bins)), H_z_y_sorted[0, k], H_z_y_sorted[0, k] - MI_z_y_sorted[0, k], MI_z_y_sorted[1, k], ids_sorted[0, k], ids_sorted[1, k])) span = 3 points_one_side = 5 for n in range(len(data)): for k in range(len(gt_factors)): print("x={}, y={}!".format(n, gt_factors[k])) img_file = join( save_dir, "{}[x={},bins={},bin_limits={},data={}].png".format( gt_factors[k], n, num_bins, bin_limits, data_proportion)) ''' ids_top10 = ids_sorted[:10, k] MI_top10 = MI_z_y_sorted[:10, k] model.cond_all_latents_traverse_v2(img_file, sess, data[n], z_comps=ids_top10, z_comp_labels=["z[{}] ({:.4f})".format(comp, mi) for comp, mi in zip(ids_top10, MI_top10)], span=span, points_1_side=points_one_side, hl_x=True, font_size=9, title="{} (RMIG={:.4f}, JEMMI={:.4f}, H={:.4f})".format( gt_factors[k], RMIG[k], JEMMI[k], H_y[k]), title_font_scale=1.5, subplot_adjust={'left': 0.16, 'right': 0.99, 'bottom': 0.01, 'top': 0.95}, size_inches=(6.5, 5.2), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) ''' ids_top3 = ids_sorted[:3, k] MI_top3 = MI_z_y_sorted[:3, k] model.cond_all_latents_traverse_v2( img_file, sess, data[n], z_comps=ids_top3, z_comp_labels=[ "z[{}] ({:.4f})".format(comp, mi) for comp, mi in zip(ids_top3, MI_top3) ], span=span, points_1_side=points_one_side, hl_x=True, font_size=9, title="{} (RMIG={:.4f}, JEMMI={:.4f}, H={:.4f})".format( gt_factors[k], RMIG[k], JEMMI[k], H_y[k]), title_font_scale=1.5, subplot_adjust={ 'left': 0.16, 'right': 0.99, 'bottom': 0.01, 'top': 0.88 }, size_inches=(6.2, 1.7), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) f.close()
def main(args): # Load config # =========================================== # with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # =========================================== # # Load dataset # =========================================== # data_file = join(RAW_DATA_DIR, "ComputerVision", "dSprites", "dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz") # It is already in the range [0, 1] with np.load(data_file, encoding="latin1") as f: x_train = f['imgs'] x_train = np.reshape(x_train, [3, 6, 40, 32, 32, 64, 64, 1]) # =========================================== # # Build model # =========================================== # if args.enc_dec_model == "1Konny": encoder = Encoder_1Konny(args.z_dim, stochastic=True) decoder = Decoder_1Konny() disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support enc_dec_model='{}'!".format( args.enc_dec_model)) model = FactorVAE([64, 64, 1], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, 'Dz_tc_loss_coeff': args.Dz_tc_loss_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # =========================================== # # Initialize session # =========================================== # config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) train_helper.load(sess, load_step=args.load_step) save_dir = make_dir_if_not_exist( join(args.save_dir, "{}_{}".format(args.enc_dec_model, args.run))) # =========================================== # # Load result file # =========================================== # result_file = join(args.SEPIN_dir, "{}_{}".format(args.enc_dec_model, args.run), "results[num_samples={}].npz".format(args.num_samples)) results = np.load(result_file, "r") print("results.keys: {}".format(list(results.keys()))) np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) # =========================================== # # Plotting # =========================================== # data = [ x_train[0, 3, 20, 16, 16], x_train[1, 3, 20, 16, 16], x_train[2, 3, 20, 16, 16] ] gt_factors = ['Shape', 'Scale', 'Rotation', 'Pos_x', 'Pos_y'] # (num_latents,) MI_zi_x = results['MI_zi_x'] SEP_zi = results['SEP_zi'] ids_sorted = np.argsort(SEP_zi, axis=0)[::-1] print("") print("MI_zi_x: {}".format(MI_zi_x)) print("SEP_zi: {}".format(SEP_zi)) print("ids_sorted: {}".format(ids_sorted)) span = 3 points_one_side = 5 for n in range(len(data)): img_file = join( save_dir, "sep_x{}_num_samples={}.png".format(n, args.num_samples)) model.cond_all_latents_traverse_v2( img_file, sess, data[n], z_comps=ids_sorted, z_comp_labels=[ "z[{}] (SEP={:.4f}, INFO={:.4f})".format( idx, SEP_zi[idx], MI_zi_x[idx]) for idx in ids_sorted ], span=span, points_1_side=points_one_side, hl_x=True, font_size=9, title_font_scale=1.5, subplot_adjust={ 'left': 0.55, 'right': 0.99, 'bottom': 0.01, 'top': 0.99 }, size_inches=(4.0, 1.7), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) f.close()
def main(args): # ===================================== # Load config # ===================================== with open(join(args.output_dir, 'config.json')) as f: config = json.load(f) args.__dict__.update(config) # ===================================== # Dataset # ===================================== celebA_loader = TFCelebALoader(root_dir=args.celebA_root_dir) img_height, img_width = args.celebA_resize_size, args.celebA_resize_size celebA_loader.build_transformation_flow_tf( *celebA_loader.get_transform_fns("1Konny", resize_size=args.celebA_resize_size)) num_train = celebA_loader.num_train_data # ===================================== # Instantiate model # ===================================== if args.activation == "relu": activation = tf.nn.relu elif args.activation == "leaky_relu": activation = tf.nn.leaky_relu else: raise ValueError("Do not support '{}' activation!".format( args.activation)) if args.enc_dec_model == "1Konny": # assert args.z_dim == 65, "For 1Konny, z_dim must be 65. Found {}!".format(args.z_dim) encoder = Encoder_1Konny(args.z_dim, stochastic=True, activation=activation) decoder = Decoder_1Konny([img_height, img_width, 3], activation=activation, output_activation=tf.nn.sigmoid) disc_z = DiscriminatorZ_1Konny(num_outputs=2) else: raise ValueError("Do not support encoder/decoder model '{}'!".format( args.enc_dec_model)) model = FactorVAE([img_height, img_width, 3], args.z_dim, encoder=encoder, decoder=decoder, discriminator_z=disc_z, rec_x_mode=args.rec_x_mode, use_gp0_z_tc=True, gp0_z_tc_mode=args.gp0_z_tc_mode) loss_coeff_dict = { 'rec_x': args.rec_x_coeff, 'kld_loss': args.kld_loss_coeff, 'tc_loss': args.tc_loss_coeff, 'gp0_z_tc': args.gp0_z_tc_coeff, } model.build(loss_coeff_dict) SimpleParamPrinter.print_all_params_tf_slim() # ===================================== # Load model # ===================================== config_proto = tf.ConfigProto(allow_soft_placement=True) config_proto.gpu_options.allow_growth = True config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9 sess = tf.Session(config=config_proto) model_dir = make_dir_if_not_exist(join(args.output_dir, "model_tf")) train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir) # Load model train_helper.load(sess, load_step=args.load_step) # ===================================== # Experiments save_dir = remove_dir_if_exist(join(args.save_dir, "FactorVAE_{}".format(args.run)), ask_4_permission=False) save_dir = make_dir_if_not_exist(save_dir) # save_dir = make_dir_if_not_exist(join(args.save_dir, "FactorVAE_{}".format(args.run))) # ===================================== np.set_printoptions(threshold=np.nan, linewidth=1000, precision=3, suppress=True) f = open(join(save_dir, 'log.txt'), mode='w') print_ = functools.partial(print_both, file=f) # z gaussian stddev # ======================================= # all_z_mean = [] all_z_stddev = [] count = 0 for batch_ids in iterate_data(int(0.05 * num_train), 10 * args.batch_size, shuffle=False): x = celebA_loader.sample_images_from_dataset(sess, 'train', batch_ids) z_mean, z_stddev = sess.run(model.get_output(['z_mean', 'z_stddev']), feed_dict={ model.is_train: False, model.x_ph: x }) all_z_mean.append(z_mean) all_z_stddev.append(z_stddev) count += len(batch_ids) print("\rProcessed {} samples!".format(count), end="") print() all_z_mean = np.concatenate(all_z_mean, axis=0) all_z_stddev = np.concatenate(all_z_stddev, axis=0) # ======================================= # z_std_error = np.std(all_z_mean, axis=0, ddof=0) z_sorted_comps = np.argsort(z_std_error)[::-1] top10_z_comps = z_sorted_comps[:10] print_("") print_("z_std_error: {}".format(z_std_error)) print_("z_sorted_std_error: {}".format(z_std_error[z_sorted_comps])) print_("z_sorted_comps: {}".format(z_sorted_comps)) print_("top10_z_comps: {}".format(top10_z_comps)) z_stddev_mean = np.mean(all_z_stddev, axis=0) info_z_comps = [ idx for idx in range(len(z_stddev_mean)) if z_stddev_mean[idx] < 0.4 ] print_("info_z_comps: {}".format(info_z_comps)) print_("len(info_z_comps): {}".format(len(info_z_comps))) # Plotting # =========================================== # seed = 389 num_samples = 30 ids = list(range(seed, seed + num_samples)) print("\nids: {}".format(ids)) data = celebA_loader.sample_images_from_dataset(sess, 'train', ids) span = 3 points_one_side = 5 # span = 8 # points_one_side = 12 for n in range(len(ids)): print("Plot conditional all comps z traverse with train sample {}!". format(ids[n])) img_file = join(save_dir, "x_train[{}]_[span={}]_hl.png".format(ids[n], span)) # model.cond_all_latents_traverse_v2(img_file, sess, data[n], # z_comps=top10_z_comps, # z_comp_labels=None, # span=span, points_1_side=points_one_side, # hl_x=True, # batch_size=args.batch_size, # dec_output_2_img_func=binary_float_to_uint8) img_file = join( save_dir, "x_train[{}]_[span={}]_hl_labeled.png".format(ids[n], span)) model.cond_all_latents_traverse_v2( img_file, sess, data[n], z_comps=top10_z_comps, z_comp_labels=["z[{}]".format(comp) for comp in top10_z_comps], span=span, points_1_side=points_one_side, hl_x=True, subplot_adjust={ 'left': 0.09, 'right': 0.98, 'bottom': 0.02, 'top': 0.98 }, size_inches=(6, 5), batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # img_file = join(save_dir, "x_train[{}]_[span={}].png".format(ids[n], span)) # model.cond_all_latents_traverse_v2(img_file, sess, data[n], # z_comps=top10_z_comps, # z_comp_labels=None, # span=span, points_1_side=points_one_side, # hl_x=False, # batch_size=args.batch_size, # dec_output_2_img_func=binary_float_to_uint8) # # img_file = join(save_dir, "x_train[{}]_[span={}]_labeled.png".format(ids[n], span)) # model.cond_all_latents_traverse_v2(img_file, sess, data[n], # z_comps=top10_z_comps, # z_comp_labels=["z[{}]".format(comp) for comp in top10_z_comps], # span=span, points_1_side=points_one_side, # hl_x=False, # subplot_adjust={'left': 0.09, 'right': 0.98, 'bottom': 0.02, 'top': 0.98}, # size_inches=(6, 5), # batch_size=args.batch_size, # dec_output_2_img_func=binary_float_to_uint8) img_file = join( save_dir, "x_train[{}]_[span={}]_info_hl.png".format(ids[n], span)) model.cond_all_latents_traverse_v2( img_file, sess, data[n], z_comps=info_z_comps, z_comp_labels=None, span=span, points_1_side=points_one_side, hl_x=True, batch_size=args.batch_size, dec_output_2_img_func=binary_float_to_uint8) # =========================================== # f.close()