def fit(self, sess, local_): for _ in range(local_): sess.run(self.inc_step) x_real, _ = next_batch_(FLAGS.bz) z = gaussian(FLAGS.bz, FLAGS.z_dim) for _ in range(3): sess.run(self.d_optim, feed_dict={ self.real: x_real, self.z: z, is_training: True }) sess.run(self.g_optim, feed_dict={ self.real: x_real, self.z: z, is_training: True }) x_real, _ = next_batch_(FLAGS.bz) return sess.run( [self.d_loss, self.g_loss, self.fit_summary], feed_dict={ self.real: x_real, self.z: gaussian(FLAGS.bz, FLAGS.z_dim), is_training: False })
def fit(self, sess, local_): for _ in range(local_): x, _ = next_batch_(FLAGS.bz) sess.run(self.optim, { self.x: x, self.z: gaussian(FLAGS.bz, FLAGS.z_dim) }) x, _ = next_batch_(FLAGS.bz * 5) return sess.run( [self.loss, self.loss_nll, self.loss_mmd, self.fit_summary], { self.x: x, self.z: gaussian(FLAGS.bz, FLAGS.z_dim) })
def fit(self, sess, local_): for _ in range(local_): x_real, _ = next_batch_(FLAGS.bz) z = gaussian(FLAGS.bz, FLAGS.z_dim) for _ in range(3): sess.run(self.d_adam, feed_dict={self.real: x_real, self.z: z}) sess.run(self.g_adam, feed_dict={self.real: x_real, self.z: z}) x_real, _ = next_batch_(FLAGS.bz) return sess.run([self.d_loss, self.g_loss, self.fit_summary], feed_dict={ self.real: x_real, self.z: gaussian(FLAGS.bz, FLAGS.z_dim) })
def z_real_(bz): if FLAGS.z_dist == 'g': return gaussian(bz, FLAGS.z_dim, 0, 2) elif FLAGS.z_dist == 'mg': return gaussian_mixture(bz, FLAGS.z_dim, 10) else: return swiss_roll(bz, FLAGS.z_dim, 10)
def sample_unlabeled_data(): x = util.sample_x_variable(batchsize, conf.ndim_x, unlabeled_dataset, gpu_enabled=conf.gpu_enabled) noise = sampler.gaussian(batchsize, conf.ndim_x, mean=0, var=0.3, gpu_enabled=conf.gpu_enabled) # x.data += noise.data return x
def sample_labeled_data(): x, y_onehot, y_id = util.sample_x_and_label_variables( batchsize, conf.ndim_x, conf.ndim_y, labeled_dataset, labels, gpu_enabled=conf.gpu_enabled) noise = sampler.gaussian(batchsize, conf.ndim_x, mean=0, var=0.3, gpu_enabled=conf.gpu_enabled) # x.data += noise.data return x, y_onehot, y_id
for t in xrange(1, num_trains_per_epoch + 1): # reconstruction phase x = sample_unlabeled_data() aae.update_learning_rate(conf.learning_rate_for_reconstruction_cost) aae.update_momentum(conf.momentum_for_reconstruction_cost) sum_loss_autoencoder += aae.train_autoencoder_unsupervised(x) # regularization phase ## train discriminator aae.update_learning_rate(conf.learning_rate_for_adversarial_cost) aae.update_momentum(conf.momentum_for_adversarial_cost) loss_discriminator = 0 for k in xrange(n_steps_to_optimize_dis): if k > 0: x = sample_unlabeled_data() z_true = sampler.gaussian(batchsize, conf.ndim_z) y_true = sampler.onehot_categorical(batchsize, conf.ndim_y) loss_discriminator += aae.train_discriminator_yz(x, y_true, z_true) loss_discriminator /= n_steps_to_optimize_dis sum_loss_discriminator += loss_discriminator ## train generator sum_loss_generator += aae.train_generator_x_yz(x) # semi-supervised classification phase x_labeled, y_onehot, y_id = sample_labeled_data() aae.update_learning_rate(conf.learning_rate_for_semi_supervised_cost) aae.update_momentum(conf.momentum_for_semi_supervised_cost) sum_loss_classifier += aae.train_classifier(x_labeled, y_id) if t % 10 == 0:
def main(): # load MNIST images train_images, train_labels = dataset.load_train_images() # config config = aae.config # settings # _l -> labeled # _u -> unlabeled max_epoch = 1000 num_trains_per_epoch = 5000 batchsize = 100 alpha = 1 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # classification # 0 -> true sample # 1 -> generated sample class_true = aae.to_variable(np.zeros(batchsize, dtype=np.int32)) class_fake = aae.to_variable(np.ones(batchsize, dtype=np.int32)) # training progress = Progress() for epoch in xrange(1, max_epoch): progress.start_epoch(epoch, max_epoch) sum_loss_reconstruction = 0 sum_loss_discriminator = 0 sum_loss_generator = 0 for t in xrange(num_trains_per_epoch): # sample from data distribution images_l, label_onehot_l, label_ids_l = dataset.sample_labeled_data( train_images, train_labels, batchsize) # reconstruction phase z_l = aae.encode_x_z(images_l) reconstruction_l = aae.decode_yz_x(label_onehot_l, z_l) loss_reconstruction = F.mean_squared_error( aae.to_variable(images_l), reconstruction_l) aae.backprop_generator(loss_reconstruction) aae.backprop_decoder(loss_reconstruction) # adversarial phase images_l = dataset.sample_labeled_data(train_images, train_labels, batchsize)[0] z_fake_l = aae.encode_x_z(images_l) z_true_l = sampler.gaussian(batchsize, config.ndim_z, mean=0, var=1) dz_true = aae.discriminate_z(z_true_l, apply_softmax=False) dz_fake = aae.discriminate_z(z_fake_l, apply_softmax=False) loss_discriminator = F.softmax_cross_entropy( dz_true, class_true) + F.softmax_cross_entropy( dz_fake, class_fake) aae.backprop_discriminator(loss_discriminator) # adversarial phase images_l = dataset.sample_labeled_data(train_images, train_labels, batchsize)[0] z_fake_l = aae.encode_x_z(images_l) dz_fake = aae.discriminate_z(z_fake_l, apply_softmax=False) loss_generator = F.softmax_cross_entropy(dz_fake, class_true) aae.backprop_generator(loss_generator) sum_loss_reconstruction += float(loss_reconstruction.data) sum_loss_discriminator += float(loss_discriminator.data) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, num_trains_per_epoch, {}) aae.save(args.model_dir) progress.show( num_trains_per_epoch, num_trains_per_epoch, { "loss_r": sum_loss_reconstruction / num_trains_per_epoch, "loss_d": sum_loss_discriminator / num_trains_per_epoch, "loss_g": sum_loss_generator / num_trains_per_epoch, })
def main(): # load MNIST images images, labels = dataset.load_train_images() # config config = aae.config # settings # _l -> labeled # _u -> unlabeled max_epoch = 1000 num_trains_per_epoch = 5000 batchsize_l = 100 batchsize_u = 100 alpha = 1 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # save validation accuracy per epoch csv_results = [] # create semi-supervised split num_validation_data = 10000 num_labeled_data = 100 num_types_of_label = 10 training_images_l, training_labels_l, training_images_u, validation_images, validation_labels = dataset.create_semisupervised( images, labels, num_validation_data, num_labeled_data, num_types_of_label, seed=args.seed) print training_labels_l # classification # 0 -> true sample # 1 -> generated sample class_true = aae.to_variable(np.zeros(batchsize_u, dtype=np.int32)) class_fake = aae.to_variable(np.ones(batchsize_u, dtype=np.int32)) # training progress = Progress() for epoch in xrange(1, max_epoch): progress.start_epoch(epoch, max_epoch) sum_loss_reconstruction = 0 sum_loss_supervised = 0 sum_loss_discriminator = 0 sum_loss_generator = 0 for t in xrange(num_trains_per_epoch): # sample from data distribution images_l, label_onehot_l, label_ids_l = dataset.sample_labeled_data( training_images_l, training_labels_l, batchsize_l, config.ndim_x, config.ndim_y) images_u = dataset.sample_unlabeled_data(training_images_u, batchsize_u, config.ndim_x) # reconstruction phase q_y_x_u, z_u = aae.encode_x_yz(images_u, apply_softmax=True) reconstruction_u = aae.decode_yz_x(q_y_x_u, z_u) loss_reconstruction = F.mean_squared_error( aae.to_variable(images_u), reconstruction_u) aae.backprop_generator(loss_reconstruction) aae.backprop_decoder(loss_reconstruction) # adversarial phase y_fake_u, z_fake_u = aae.encode_x_yz(images_u, apply_softmax=True) z_true_u = sampler.gaussian(batchsize_u, config.ndim_z, mean=0, var=1) y_true_u = sampler.onehot_categorical(batchsize_u, config.ndim_y) discrimination_z_true = aae.discriminate_z(z_true_u, apply_softmax=False) discrimination_y_true = aae.discriminate_y(y_true_u, apply_softmax=False) discrimination_z_fake = aae.discriminate_z(z_fake_u, apply_softmax=False) discrimination_y_fake = aae.discriminate_y(y_fake_u, apply_softmax=False) loss_discriminator_z = F.softmax_cross_entropy( discrimination_z_true, class_true) + F.softmax_cross_entropy( discrimination_z_fake, class_fake) loss_discriminator_y = F.softmax_cross_entropy( discrimination_y_true, class_true) + F.softmax_cross_entropy( discrimination_y_fake, class_fake) loss_discriminator = loss_discriminator_z + loss_discriminator_y aae.backprop_discriminator(loss_discriminator) # adversarial phase y_fake_u, z_fake_u = aae.encode_x_yz(images_u, apply_softmax=True) discrimination_z_fake = aae.discriminate_z(z_fake_u, apply_softmax=False) discrimination_y_fake = aae.discriminate_y(y_fake_u, apply_softmax=False) loss_generator_z = F.softmax_cross_entropy(discrimination_z_fake, class_true) loss_generator_y = F.softmax_cross_entropy(discrimination_y_fake, class_true) loss_generator = loss_generator_z + loss_generator_y aae.backprop_generator(loss_generator) # supervised phase unnormalized_q_y_x_l, z_l = aae.encode_x_yz(images_l, apply_softmax=False) loss_supervised = F.softmax_cross_entropy( unnormalized_q_y_x_l, aae.to_variable(label_ids_l)) aae.backprop_generator(loss_supervised) sum_loss_reconstruction += float(loss_reconstruction.data) sum_loss_supervised += float(loss_supervised.data) sum_loss_discriminator += float(loss_discriminator.data) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, num_trains_per_epoch, {}) aae.save(args.model_dir) # validation phase # split validation data to reduce gpu memory consumption images_v, _, label_ids_v = dataset.sample_labeled_data( validation_images, validation_labels, num_validation_data, config.ndim_x, config.ndim_y) images_v_segments = np.split(images_v, num_validation_data // 500) label_ids_v_segments = np.split(label_ids_v, num_validation_data // 500) num_correct = 0 for images_v, labels_v in zip(images_v_segments, label_ids_v_segments): predicted_labels = aae.argmax_x_label(images_v, test=True) for i, label in enumerate(predicted_labels): if label == labels_v[i]: num_correct += 1 validation_accuracy = num_correct / float(num_validation_data) progress.show( num_trains_per_epoch, num_trains_per_epoch, { "loss_r": sum_loss_reconstruction / num_trains_per_epoch, "loss_s": sum_loss_supervised / num_trains_per_epoch, "loss_d": sum_loss_discriminator / num_trains_per_epoch, "loss_g": sum_loss_generator / num_trains_per_epoch, "accuracy": validation_accuracy }) # write accuracy to csv csv_results.append([epoch, validation_accuracy]) data = pd.DataFrame(csv_results) data.columns = ["epoch", "accuracy"] data.to_csv("{}/result.csv".format(args.model_dir))
def main(): # load MNIST images images, labels = dataset.load_train_images() # config config = aae.config # settings max_epoch = 1000 num_trains_per_epoch = 5000 batchsize = 100 alpha = 1 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # classification # 0 -> true sample # 1 -> generated sample class_true = aae.to_variable(np.zeros(batchsize, dtype=np.int32)) class_fake = aae.to_variable(np.ones(batchsize, dtype=np.int32)) # training progress = Progress() for epoch in xrange(1, max_epoch): progress.start_epoch(epoch, max_epoch) sum_loss_reconstruction = 0 sum_loss_discriminator = 0 sum_loss_generator = 0 sum_loss_cluster_head = 0 for t in xrange(num_trains_per_epoch): # sample from data distribution images_u = dataset.sample_unlabeled_data(images, batchsize) # reconstruction phase qy_x_u, z_u = aae.encode_x_yz(images_u, apply_softmax=True) representation = aae.encode_yz_representation(qy_x_u, z_u) reconstruction_u = aae.decode_representation_x(representation) loss_reconstruction = F.mean_squared_error(aae.to_variable(images_u), reconstruction_u) aae.backprop_generator(loss_reconstruction) aae.backprop_decoder(loss_reconstruction) # adversarial phase y_fake_u, z_fake_u = aae.encode_x_yz(images_u, apply_softmax=True) z_true_u = sampler.gaussian(batchsize, config.ndim_z, mean=0, var=1) y_true_u = sampler.onehot_categorical(batchsize, config.ndim_y) discrimination_z_true = aae.discriminate_z(z_true_u, apply_softmax=False) discrimination_y_true = aae.discriminate_y(y_true_u, apply_softmax=False) discrimination_z_fake = aae.discriminate_z(z_fake_u, apply_softmax=False) discrimination_y_fake = aae.discriminate_y(y_fake_u, apply_softmax=False) loss_discriminator_z = F.softmax_cross_entropy(discrimination_z_true, class_true) + F.softmax_cross_entropy(discrimination_z_fake, class_fake) loss_discriminator_y = F.softmax_cross_entropy(discrimination_y_true, class_true) + F.softmax_cross_entropy(discrimination_y_fake, class_fake) loss_discriminator = loss_discriminator_z + loss_discriminator_y aae.backprop_discriminator(loss_discriminator) # adversarial phase y_fake_u, z_fake_u = aae.encode_x_yz(images_u, apply_softmax=True) discrimination_z_fake = aae.discriminate_z(z_fake_u, apply_softmax=False) discrimination_y_fake = aae.discriminate_y(y_fake_u, apply_softmax=False) loss_generator_z = F.softmax_cross_entropy(discrimination_z_fake, class_true) loss_generator_y = F.softmax_cross_entropy(discrimination_y_fake, class_true) loss_generator = loss_generator_z + loss_generator_y aae.backprop_generator(loss_generator) # additional cost function that penalizes the euclidean distance between of every two of cluster distance = aae.compute_distance_of_cluster_heads() loss_cluster_head = -F.sum(distance) aae.backprop_cluster_head(loss_cluster_head) sum_loss_reconstruction += float(loss_reconstruction.data) sum_loss_discriminator += float(loss_discriminator.data) sum_loss_generator += float(loss_generator.data) sum_loss_cluster_head += float(aae.nCr(config.ndim_y, 2) * config.cluster_head_distance_threshold + loss_cluster_head.data) if t % 10 == 0: progress.show(t, num_trains_per_epoch, {}) aae.save(args.model_dir) progress.show(num_trains_per_epoch, num_trains_per_epoch, { "loss_r": sum_loss_reconstruction / num_trains_per_epoch, "loss_d": sum_loss_discriminator / num_trains_per_epoch, "loss_g": sum_loss_generator / num_trains_per_epoch, "loss_c": sum_loss_cluster_head / num_trains_per_epoch, })
def main(run_load_from_file=False): # load MNIST images images, labels = dataset.load_train_images() # config opt = Operation() opt.check_dir(config.ckpt_dir, is_restart=False) opt.check_dir(config.log_dir, is_restart=True) # setting max_epoch = 510 num_trains_per_epoch = 500 batch_size_u = 100 # training with tf.device(config.device): h = build_graph() sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True) sess_config.gpu_options.allow_growth = True sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9 saver = tf.train.Saver(max_to_keep=2) with tf.Session(config=sess_config) as sess: ''' Load from checkpoint or start a new session ''' if run_load_from_file: saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir)) training_epoch_loss, _ = pickle.load( open(config.ckpt_dir + '/pickle.pkl', 'rb')) else: sess.run(tf.global_variables_initializer()) training_epoch_loss = [] # Recording loss per epoch process = Process() for epoch in range(max_epoch): process.start_epoch(epoch, max_epoch) ''' Learning rate generator ''' learning_rate = opt.ladder_learning_rate(epoch + len(training_epoch_loss)) # Recording loss per iteration training_loss_set = [] sum_loss_reconstruction = 0 sum_loss_supervised = 0 sum_loss_discrminator = 0 sum_loss_generator = 0 process_iteration = Process() for i in range(num_trains_per_epoch): process_iteration.start_epoch(i, num_trains_per_epoch) # sample from data distribution images_u = dataset.sample_unlabeled_data(images, batch_size_u) # reconstruction_phase _, loss_reconstruction = sess.run([h.opt_r, h.loss_r], feed_dict={ h.x: images_u, h.lr: learning_rate }) z_true_u = sampler.gaussian(batch_size_u, config.ndim_z, mean=0, var=1) y_true_u = sampler.onehot_categorical(batch_size_u, config.ndim_y) # adversarial phase for discriminator _, loss_discriminator_y = sess.run([h.opt_dy, h.loss_dy], feed_dict={ h.x: images_u, h.y: y_true_u, h.lr: learning_rate }) _, loss_discriminator_z = sess.run([h.opt_dz, h.loss_dz], feed_dict={ h.x: images_u, h.z: z_true_u, h.lr: learning_rate }) loss_discriminator = loss_discriminator_y + loss_discriminator_z # adversarial phase for generator _, loss_generator_y, loss_generator_z = sess.run( [h.opt_e, h.loss_gy, h.loss_gz], feed_dict={ h.x: images_u, h.lr: learning_rate }) loss_generator = loss_generator_y + loss_generator_z training_loss_set.append([ loss_reconstruction, loss_discriminator, loss_discriminator_y, loss_discriminator_z, loss_generator, loss_generator_z, loss_generator_y, ]) sum_loss_reconstruction += loss_reconstruction sum_loss_discrminator += loss_discriminator sum_loss_generator += loss_generator if i % 1000 == 0: process_iteration.show_table_2d( i, num_trains_per_epoch, { 'reconstruction': sum_loss_reconstruction / (i + 1), 'discriminator': sum_loss_discrminator / (i + 1), 'generator': sum_loss_generator / (i + 1), }) # In end of epoch, summary the loss average_training_loss_per_epoch = np.mean( np.array(training_loss_set), axis=0) # append validation accuracy to the training loss training_epoch_loss.append(average_training_loss_per_epoch) loss_name_per_epoch = [ 'reconstruction', 'discriminator', 'discriminator_y', 'discriminator_z', 'generator', 'generator_z', 'generator_y', ] if epoch % 1 == 0: process.show_bar( epoch, max_epoch, { 'loss_r': average_training_loss_per_epoch[0], 'loss_d': average_training_loss_per_epoch[1], 'loss_g': average_training_loss_per_epoch[4], }) plt.tile_images(sess.run(h.x_, feed_dict={h.x: images_u}), dir=config.log_dir, filename='x_rec_epoch_{}'.format( str(epoch).zfill(3))) if epoch % 10 == 0: saver.save(sess, os.path.join(config.ckpt_dir, 'model_ckptpoint'), global_step=epoch) pickle.dump((training_epoch_loss, loss_name_per_epoch), open(config.ckpt_dir + '/pickle.pkl', 'wb'))
def gen(self, sess, bz): return sess.run(self.fake, feed_dict={ self.z: gaussian(bz, FLAGS.z_dim), is_training: False })
def gen(self, sess, bz): return sess.run([self.gen_x, self.gen_summary], {self.z: gaussian(bz, FLAGS.z_dim)})
def main(): # load MNIST images images, labels = dataset.load_train_images() # config config = aae.config # settings # _l -> labeled # _u -> unlabeled max_epoch = 1000 num_trains_per_epoch = 5000 batchsize_l = 100 batchsize_u = 100 alpha = 1 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # save validation accuracy per epoch csv_results = [] # create semi-supervised split num_validation_data = 10000 num_labeled_data = 100 num_types_of_label = 10 training_images_l, training_labels_l, training_images_u, validation_images, validation_labels = dataset.create_semisupervised( images, labels, num_validation_data, num_labeled_data, num_types_of_label) # classification # 0 -> true sample # 1 -> generated sample class_true = aae.to_variable(np.zeros(batchsize_u, dtype=np.int32)) class_fake = aae.to_variable(np.ones(batchsize_u, dtype=np.int32)) # training progress = Progress() for epoch in xrange(1, max_epoch): progress.start_epoch(epoch, max_epoch) sum_loss_reconstruction = 0 sum_loss_supervised = 0 sum_loss_discriminator = 0 sum_loss_generator = 0 sum_loss_cluster_head = 0 for t in xrange(num_trains_per_epoch): # sample from data distribution images_l, label_onehot_l, label_ids_l = dataset.sample_labeled_data( training_images_l, training_labels_l, batchsize_l) images_u = dataset.sample_unlabeled_data(training_images_u, batchsize_u) # reconstruction phase qy_x_u, z_u = aae.encode_x_yz(images_u, apply_softmax=True) representation = aae.encode_yz_representation(qy_x_u, z_u) reconstruction_u = aae.decode_representation_x(representation) loss_reconstruction = F.mean_squared_error( aae.to_variable(images_u), reconstruction_u) aae.backprop_generator(loss_reconstruction) aae.backprop_decoder(loss_reconstruction) # adversarial phase y_fake_u, z_fake_u = aae.encode_x_yz(images_u, apply_softmax=True) z_true_u = sampler.gaussian(batchsize_u, config.ndim_z, mean=0, var=1) y_true_u = sampler.onehot_categorical(batchsize_u, config.ndim_y) dz_true = aae.discriminate_z(z_true_u, apply_softmax=False) dy_true = aae.discriminate_y(y_true_u, apply_softmax=False) dz_fake = aae.discriminate_z(z_fake_u, apply_softmax=False) dy_fake = aae.discriminate_y(y_fake_u, apply_softmax=False) loss_discriminator_z = F.softmax_cross_entropy( dz_true, class_true) + F.softmax_cross_entropy( dz_fake, class_fake) loss_discriminator_y = F.softmax_cross_entropy( dy_true, class_true) + F.softmax_cross_entropy( dy_fake, class_fake) loss_discriminator = loss_discriminator_z + loss_discriminator_y aae.backprop_discriminator(loss_discriminator) # adversarial phase y_fake_u, z_fake_u = aae.encode_x_yz(images_u, apply_softmax=True) dz_fake = aae.discriminate_z(z_fake_u, apply_softmax=False) dy_fake = aae.discriminate_y(y_fake_u, apply_softmax=False) loss_generator_z = F.softmax_cross_entropy(dz_fake, class_true) loss_generator_y = F.softmax_cross_entropy(dy_fake, class_true) loss_generator = loss_generator_z + loss_generator_y aae.backprop_generator(loss_generator) # supervised phase log_qy_x_l, z_l = aae.encode_x_yz(images_l, apply_softmax=False) loss_supervised = F.softmax_cross_entropy( log_qy_x_l, aae.to_variable(label_ids_l)) aae.backprop_generator(loss_supervised) # additional cost function that penalizes the euclidean distance between of every two of cluster distance = aae.compute_distance_of_cluster_heads() loss_cluster_head = -F.sum(distance) aae.backprop_cluster_head(loss_cluster_head) sum_loss_reconstruction += float(loss_reconstruction.data) sum_loss_supervised += float(loss_supervised.data) sum_loss_discriminator += float(loss_discriminator.data) sum_loss_generator += float(loss_generator.data) sum_loss_cluster_head += float( aae.nCr(config.ndim_y, 2) * config.cluster_head_distance_threshold + loss_cluster_head.data) if t % 10 == 0: progress.show(t, num_trains_per_epoch, {}) aae.save(args.model_dir) # validation phase images_v_segments = np.split(validation_images, num_validation_data // 1000) labels_v_segments = np.split(validation_labels, num_validation_data // 1000) sum_accuracy = 0 for images_v, labels_v in zip(images_v_segments, labels_v_segments): qy = aae.encode_x_yz(images_v, apply_softmax=True, test=True)[0] accuracy = F.accuracy(qy, aae.to_variable(labels_v)) sum_accuracy += float(accuracy.data) validation_accuracy = sum_accuracy / len(images_v_segments) progress.show( num_trains_per_epoch, num_trains_per_epoch, { "loss_r": sum_loss_reconstruction / num_trains_per_epoch, "loss_s": sum_loss_supervised / num_trains_per_epoch, "loss_d": sum_loss_discriminator / num_trains_per_epoch, "loss_g": sum_loss_generator / num_trains_per_epoch, "loss_c": sum_loss_cluster_head / num_trains_per_epoch, "accuracy": validation_accuracy }) # write accuracy to csv csv_results.append([epoch, validation_accuracy]) data = pd.DataFrame(csv_results) data.columns = ["epoch", "accuracy"] data.to_csv("{}/result.csv".format(args.model_dir))
def gen(self, sess, bz): return sess.run(self.fake, feed_dict={self.z: gaussian(bz, FLAGS.z_dim)})
def main(run_load_from_file=False): # load MNIST images images, labels = dataset.load_test_images() # config opt = Operation() opt.check_dir(config.ckpt_dir, is_restart=False) opt.check_dir(config.log_dir, is_restart=True) max_epoch = 510 num_trains_per_epoch = 500 batch_size_u = 100 # training with tf.device(config.device): h = build_graph() sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True) sess_config.gpu_options.allow_growth = True sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9 saver = tf.train.Saver(max_to_keep=2) with tf.Session(config=sess_config) as sess: ''' Load from checkpoint or start a new session ''' if run_load_from_file: saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir)) training_epoch_loss, _ = pickle.load( open(config.ckpt_dir + '/pickle.pkl', 'rb')) else: sess.run(tf.global_variables_initializer()) training_epoch_loss = [] # Recording loss per epoch process = Process() for epoch in range(max_epoch): process.start_epoch(epoch, max_epoch) ''' Learning rate generator ''' learning_rate = 0.0001 # Recording loss per iteration sum_loss_reconstruction = 0 sum_loss_discrminator_z = 0 sum_loss_discrminator_img = 0 sum_loss_generator_z = 0 sum_loss_generator_img = 0 process_iteration = Process() for i in range(num_trains_per_epoch): process_iteration.start_epoch(i, num_trains_per_epoch) # Inputs ''' _l -> labeled _u -> unlabeled ''' images_u = dataset.sample_unlabeled_data(images, batch_size_u) if config.distribution_sampler == 'swiss_roll': z_true_u = sampler.swiss_roll(batch_size_u, config.ndim_z, config.num_types_of_label) elif config.distribution_sampler == 'gaussian_mixture': z_true_u = sampler.gaussian_mixture( batch_size_u, config.ndim_z, config.num_types_of_label) elif config.distribution_sampler == 'uniform_desk': z_true_u = sampler.uniform_desk(batch_size_u, config.ndim_z, radius=2) elif config.distribution_sampler == 'gaussian': z_true_u = sampler.gaussian(batch_size_u, config.ndim_z, var=1) elif config.distribution_sampler == 'uniform': z_true_u = sampler.uniform(batch_size_u, config.ndim_z, minv=-1, maxv=1) # reconstruction_phase _, loss_reconstruction = sess.run([h.opt_r, h.loss_r], feed_dict={ h.x: images_u, h.lr: learning_rate }) # adversarial phase for discriminator_z images_u_s = dataset.sample_unlabeled_data( images, batch_size_u) _, loss_discriminator_z = sess.run([h.opt_dz, h.loss_dz], feed_dict={ h.x: images_u, h.z: z_true_u, h.lr: learning_rate }) _, loss_discriminator_img = sess.run([h.opt_dimg, h.loss_dimg], feed_dict={ h.x: images_u, h.x_s: images_u_s, h.lr: learning_rate }) # adversarial phase for generator _, loss_generator_z = sess.run([h.opt_e, h.loss_e], feed_dict={ h.x: images_u, h.lr: learning_rate }) _, loss_generator_img = sess.run([h.opt_d, h.loss_d], feed_dict={ h.x: images_u, h.lr: learning_rate }) sum_loss_reconstruction += loss_reconstruction sum_loss_discrminator_z += loss_discriminator_z sum_loss_discrminator_img += loss_discriminator_img sum_loss_generator_z += loss_generator_z sum_loss_generator_img += loss_generator_img if i % 1000 == 0: process_iteration.show_table_2d( i, num_trains_per_epoch, { 'reconstruction': sum_loss_reconstruction / (i + 1), 'discriminator_z': sum_loss_discrminator_z / (i + 1), 'discriminator_img': sum_loss_discrminator_img / (i + 1), 'generator_z': sum_loss_generator_z / (i + 1), 'generator_img': sum_loss_generator_img / (i + 1), }) average_loss_per_epoch = [ sum_loss_reconstruction / num_trains_per_epoch, sum_loss_discrminator_z / num_trains_per_epoch, sum_loss_discrminator_img / num_trains_per_epoch, sum_loss_generator_z / num_trains_per_epoch, sum_loss_generator_img / num_trains_per_epoch, (sum_loss_discrminator_z + sum_loss_discrminator_img) / num_trains_per_epoch, (sum_loss_generator_z + sum_loss_generator_img) / num_trains_per_epoch ] training_epoch_loss.append(average_loss_per_epoch) training_loss_name = [ 'reconstruction', 'discriminator_z', 'discriminator_img', 'generator_z', 'generator_img', 'discriminator', 'generator' ] if epoch % 1 == 0: process.show_bar( epoch, max_epoch, { 'loss_r': average_loss_per_epoch[0], 'loss_d': average_loss_per_epoch[5], 'loss_g': average_loss_per_epoch[6] }) plt.scatter_labeled_z( sess.run(h.z_r, feed_dict={h.x: images[:1000]}), [int(var) for var in labels[:1000]], dir=config.log_dir, filename='z_representation-{}'.format(epoch)) if epoch % 10 == 0: saver.save(sess, os.path.join(config.ckpt_dir, 'model_ckptpoint'), global_step=epoch) pickle.dump((training_epoch_loss, training_loss_name), open(config.ckpt_dir + '/pickle.pkl', 'wb'))
def main(run_load_from_file=False): # load MNIST images images, labels = dataset.load_train_images() # config opt = Operation() opt.check_dir(config.ckpt_dir, is_restart=False) opt.check_dir(config.log_dir, is_restart=True) # setting max_epoch = 510 num_trains_per_epoch = 500 batch_size_l = 100 batch_size_u = 100 # create semi-supervised split num_validation_data = 10000 num_labeled_data = 100 num_types_of_label = 10 training_images_l, training_labels_l, training_images_u, validation_images, validation_labels = dataset.create_semisupervised( images, labels, num_validation_data, num_labeled_data, num_types_of_label) # training with tf.device(config.device): h = build_graph() sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True) sess_config.gpu_options.allow_growth = True sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9 saver = tf.train.Saver(max_to_keep=2) with tf.Session(config=sess_config) as sess: ''' Load from checkpoint or start a new session ''' if run_load_from_file: saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_dir)) training_epoch_loss, _ = pickle.load( open(config.ckpt_dir + '/pickle.pkl', 'rb')) else: sess.run(tf.global_variables_initializer()) training_epoch_loss = [] # Recording loss per epoch process = Process() for epoch in range(max_epoch): process.start_epoch(epoch, max_epoch) ''' Learning rate generator ''' learning_rate = opt.ladder_learning_rate(epoch + len(training_epoch_loss)) # Recording loss per iteration training_loss_set = [] sum_loss_reconstruction = 0 sum_loss_supervised = 0 sum_loss_discrminator = 0 sum_loss_generator = 0 sum_loss_cluster_head = 0 process_iteration = Process() for i in range(num_trains_per_epoch): process_iteration.start_epoch(i, num_trains_per_epoch) # sample from data distribution images_l, label_onehot_l, label_id_l = dataset.sample_labeled_data( training_images_l, training_labels_l, batch_size_l) images_u = dataset.sample_unlabeled_data( training_images_u, batch_size_u) # additional cost function that penalizes the euclidean between of every two of cluster if epoch == 0: for j in range(5): starting_labels, ending_labels = dataset.cluster_create_dataset( config.ndim_y) _, loss_transform = sess.run( [h.opt_t, h.loss_t], feed_dict={ h.g_s: starting_labels, h.g_e: ending_labels, h.lr: learning_rate }) # reconstruction_phase _, loss_reconstruction = sess.run([h.opt_r, h.loss_r], feed_dict={ h.x: images_u, h.lr: learning_rate }) z_true_u = sampler.gaussian(batch_size_u, config.ndim_z, mean=0, var=1) y_true_u = sampler.onehot_categorical(batch_size_u, config.ndim_y) # adversarial phase for discriminator _, loss_discriminator_y = sess.run([h.opt_dy, h.loss_dy], feed_dict={ h.x: images_u, h.y: y_true_u, h.lr: learning_rate }) _, loss_discriminator_z = sess.run([h.opt_dz, h.loss_dz], feed_dict={ h.x: images_u, h.z: z_true_u, h.lr: learning_rate }) loss_discriminator = loss_discriminator_y + loss_discriminator_z # adversarial phase for generator _, loss_generator_y, loss_generator_z = sess.run( [h.opt_e, h.loss_gy, h.loss_gz], feed_dict={ h.x: images_u, h.lr: learning_rate }) loss_generator = loss_generator_y + loss_generator_z # supervised phase _, loss_generator_supervised = sess.run([h.opt_ey, h.loss_ey], feed_dict={ h.x: images_l, h.y_s: label_id_l, h.lr: learning_rate }) training_loss_set.append([ loss_reconstruction, loss_discriminator, loss_discriminator_y, loss_discriminator_z, loss_generator, loss_generator_z, loss_generator_y, loss_generator_supervised, loss_transform ]) sum_loss_reconstruction += loss_reconstruction sum_loss_discrminator += loss_discriminator sum_loss_generator += loss_generator sum_loss_supervised += loss_generator_supervised sum_loss_cluster_head += loss_transform if i % 1000 == 0: process_iteration.show_table_2d( i, num_trains_per_epoch, { 'reconstruction': sum_loss_reconstruction / (i + 1), 'discriminator': sum_loss_discrminator / (i + 1), 'generator': sum_loss_generator / (i + 1), 'supervise': sum_loss_supervised / (i + 1), 'cluster_head': sum_loss_cluster_head / (i + 1) }) # In end of epoch, summary the loss average_training_loss_per_epoch = np.mean( np.array(training_loss_set), axis=0) # validation phase images_v_segments = np.split(validation_images, num_validation_data // 1000) labels_v_segments = np.split(validation_labels, num_validation_data // 1000) sum_accuracy = 0 for images_v, labels_v in zip(images_v_segments, labels_v_segments): y_v = sess.run(h.y_r, feed_dict={h.x: images_v}) accuracy = opt.compute_accuracy(y_v, labels_v) sum_accuracy += accuracy validation_accuracy = sum_accuracy / len(images_v_segments) # append validation accuracy to the training loss average_loss_per_epoch = np.append(average_training_loss_per_epoch, validation_accuracy) training_epoch_loss.append(average_loss_per_epoch) loss_name_per_epoch = [ 'reconstruction', 'discriminator', 'discriminator_y', 'discriminator_z', 'generator', 'generator_z', 'generator_y', 'supervised', 'transform', 'validation_accuracy' ] if epoch % 1 == 0: process.show_bar( epoch, max_epoch, { 'loss_r': average_loss_per_epoch[0], 'loss_d': average_loss_per_epoch[1], 'loss_g': average_loss_per_epoch[4], 'loss_v': average_loss_per_epoch[9], }) plt.tile_images(sess.run(h.x_, feed_dict={h.x: images_u}), dir=config.log_dir, filename='x_rec_epoch_{}'.format( str(epoch).zfill(3))) if epoch % 10 == 0: saver.save(sess, os.path.join(config.ckpt_dir, 'model_ckptpoint'), global_step=epoch) pickle.dump((training_epoch_loss, loss_name_per_epoch), open(config.ckpt_dir + '/pickle.pkl', 'wb')) plt.plot_double_scale_trend(config.ckpt_dir)