sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) sess.run(tf.global_variables_initializer()) np.random.seed(int(time.time())) test_images = mnist.test.images[0:16] test_origin = test_images*0.5 test_ref,_ = mnist.test.next_batch(16) test_input = np.minimum(test_origin + test_ref*0.7 + np.random.uniform(size = (16,784)), 1.0) test_ref = test_ref*0.85 test_origin = test_origin/0.5*0.9 my_lib.mnist_4by4_save(np.reshape(test_input,(-1,784)),file_name + '/input_noise.png') my_lib.mnist_4by4_save(np.reshape(test_origin,(-1,784)),file_name + '/ground_true.png') my_lib.mnist_4by4_save(np.reshape(test_ref,(-1,784)),file_name + '/input_ref.png') log_txt = open(file_name +'/log.txt','w') content_error = [] mse_error = [] cross_entropy_error = [] for i in range(1000000) : train_images,_ = mnist.train.next_batch(100) scale = np.random.uniform(0.2,1.0) train_origin = train_images * scale
mse = sess.graph.get_tensor_by_name("mse:0") content_loss = sess.graph.get_tensor_by_name("content_loss:0") content_optim = sess.graph.get_operation_by_name("content_optim") np.random.seed(1) test_images = mnist.test.images[0:16] test_origin = test_images * 0.5 test_ref, _ = mnist.test.next_batch(16) test_input = np.minimum( test_origin + test_ref * 0.7 + np.random.uniform(size=(16, 784)), 1.0) np.random.seed(int(time.time())) my_lib.mnist_4by4_save(np.reshape(test_input, (-1, 784)), file_name + '/1_input_noise.png') my_lib.mnist_4by4_save(np.reshape(test_origin, (-1, 784)), file_name + '/1_ground_true.png') my_lib.mnist_4by4_save(np.reshape(test_ref, (-1, 784)), file_name + '/1_input_ref.png') r, val_e = sess.run( [G_y, content_loss], feed_dict={ u: np.reshape(test_input, (-1, 28, 28, 1)), ref: np.reshape(test_ref, (-1, 28, 28, 1)), t: np.reshape(test_origin, (-1, 28, 28, 1)), isTrain: False }) my_lib.mnist_4by4_save(np.reshape(r, (-1, 784)), file_name + '/1_result.png')