def main(): global k_p if not os.path.exists(FLAGS.working_directory): os.makedirs(FLAGS.working_directory) if not os.path.exists(FLAGS.data_directory): os.makedirs(FLAGS.data_directory) if os.path.exists(os.path.join(FLAGS.working_directory,"save","ver")): ver_desc = open(os.path.join(FLAGS.working_directory,"save","ver")) FLAGS.version = int(ver_desc.readline()) ver_desc.close() else: if not os.path.exists(os.path.join(FLAGS.working_directory,"save")): os.mkdir(os.path.join(FLAGS.working_directory,"save")) ver_desc = open(os.path.join(FLAGS.working_directory,"save","ver"),'w') ver_desc.write(str(FLAGS.version)+"\n") ver_desc.close() load_desc() gan = GAN(FLAGS.version, FLAGS.clip_abs, FLAGS.hidden_size, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.data_directory, os.path.join(FLAGS.working_directory, "log")) saver = tf.train.Saver() load_model(gan.sess, saver) # start the queue runners coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=gan.sess, coord=coord) for epoch in range(start_epoch, FLAGS.max_epoch): pbar = ProgressBar() for update in pbar(range(FLAGS.updates_per_epoch)): k_p = gan.update_params(FLAGS.global_step, FLAGS.learning_rate, k_p) FLAGS.global_step = FLAGS.global_step + 1 if FLAGS.global_step % FLAGS.lr_update == FLAGS.lr_update - 1: FLAGS.learning_rate = FLAGS.learning_rate*0.5 print "update learning rate:"+str(FLAGS.learning_rate) cm = gan.get_loss() print "loss: " + str(cm) gan.generate_and_save_images(64, FLAGS.working_directory) #(int(math.sqrt(FLAGS.batch_size))+1)**2 save_model(gan.sess, saver) # ask threads to stop coord.request_stop() # wait for threads to finish coord.join(threads) gan.sess.close()
training_loss = 0.0 batch_size = 128 for i in range(40): images, _ = mnist.train.next_batch(128) flagReal = kneighbors_graph(images, 5, mode='connectivity', include_self=True) disReal = kneighbors_graph(images, 5, mode='distance', include_self=True) flagReal = flagReal.toarray() disReal = disReal.toarray() flagReal2 = (flagReal - 1) flagReal2 = flagReal2 * flagReal2 disReal = 100 * flagReal2 + flagReal * 1 for i in range(batch_size): Xtemp2 = np.matlib.repmat(images[i, :], batch_size, 1) disTemp = disReal[i, :].reshape(1, batch_size) flagTemp = flagReal[i, :].reshape(1, batch_size) # images=orderedImage[i*128:(i+1)*128,:] loss_value = model.update_params(images, images, flagTemp, disTemp) training_loss += loss_value training_loss = training_loss / (1000 * 128) print("Loss %f" % training_loss) model.generate_and_save_images(128, "")
f_inputs=images[1], sum_epoch=epoch) del images if FLAGS.save_imgs: images = novaSet.get_data(which_config = ['data'],\ which_type = None,\ indices = image_indices,\ is_train=FLAGS.save_train) print("Generating and saving images for epoch {}...". format(epoch)) image_dir = join(FLAGS.working_directory, "") + join( config, config_type + "/imgs") model.generate_and_save_images(images[0], image_indices, join(image_dir, "fake"), epoch) print("Saving final model...") model.save_model(FLAGS.max_epoch + 1) print("Generating and saving final image summary") images = novaSet.next_batch(which_config=[config, 'data'], which_type=config_type, batch_size=5) model.save_image_summary(r_inputs=images[0], f_inputs=images[1], sum_epoch=epoch) del images if FLAGS.save_imgs: