def test(self, config): # NOTE : if train, the nx, ny are ingnored # Read image files and make their sub-images and saved them as a h5 file format nx, ny = input_setup_test(config) # get the test.h5 file data_dir = checkpoint_dir(config) # Read h5 format data file input_, label_ = read_data(data_dir) #To load the checkpoint use to test or pretrain and refine model self.load(config.checkpoint_dir) print("Now Start Testing...") time_ = time.time() result = self.pred.eval({self.images: input_}) + input_ #image merge : only Y channel image = merge(result, [nx, ny], self.c_dim) #from Y channel to RGB image = Ycbcr2RGB(image, config) #show image #checkimage(image) #merge(result, [nx, ny], self.c_dim) checkimage(image) imsave(image, config.result_dir + '/result.png', config) print("time: [%4.4f]" % (time.time() - time_))
def train(self, config): # NOTE : if train, the nx, ny are ingnored input_setup(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) print(input_.shape, label_.shape) print(config.is_train) # Stochastic gradient descent with the standard backpropagation self.train_op = tf.train.AdamOptimizer( learning_rate=config.learning_rate).minimize(self.loss) tf.initialize_all_variables().run() counter = 0 time_ = time.time() self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") for ep in range(config.epoch): # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): batch_images = input_[idx * config.batch_size:(idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size:(idx + 1) * config.batch_size] counter += 1 _, err = self.sess.run([self.train_op, self.loss], feed_dict={ self.images: batch_images, self.labels: batch_labels }) if counter % 10 == 0: print( "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep + 1), counter, time.time() - time_, err)) #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err) if counter % 500 == 0: self.save(config.checkpoint_dir, counter) # Test else: print("Now Start Testing...") result = self.pred.eval({ self.images: input_[0].reshape(1, self.h, self.w, self.c_dim) }) x = np.squeeze(result) # checkimage(x) print(x.shape) imsave(x, 'result/result.png', config)
def train(self, config): err_li = [] # NOTE : if train, the nx, ny are ingnored nx, ny = input_setup(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) # Stochastic gradient descent with the standard backpropagation #self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss) self.train_op = tf.train.AdamOptimizer( learning_rate=config.learning_rate).minimize(self.loss) #最小化w,b tf.initialize_all_variables().run() #session開始run counter = 0 time_ = time.time() self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") for ep in range(config.epoch): #總過跑幾次epoch # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): #每次跑128個batch batch_images = input_[idx * config.batch_size:(idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size:(idx + 1) * config.batch_size] counter += 1 _, err = self.sess.run([self.train_op, self.loss], feed_dict={ self.images: batch_images, self.labels: batch_labels }) err_li.append( err) #feed_dict會傳到build model的self.image和self.label裡 if counter % 10 == 0: print( "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep + 1), counter, time.time() - time_, err)) #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err) if counter % 500 == 0: self.save(config.checkpoint_dir, counter) # Test else: print("Now Start Testing...") #print("nx","ny",nx,ny) result = self.pred.eval({self.images: input_}) #print(label_[1] - result[1]) image = merge(result, [nx, ny], self.c_dim) #checkimage(image) imsave(image, config.result_dir + '/result.png', config)
def test(self, config): print('Testing...') nx, ny = input_setup2(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) self.load(config.checkpoint_dir) # Test result = self.pred.eval({self.images: input_}) image = merge(result, [nx, ny], self.c_dim) #image_LR = merge(input_, [nx, ny], self.c_dim) #checkimage(image_LR) # Saving Image base, ext = os.path.basename(config.test_img).split('.') imsave(image, os.path.join(config.result_dir, base + '.png'), config)
def test(self, config): input_setup(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) print(input_.shape, label_.shape) print(config.is_train) counter = 0 time_ = time.time() self.load(config.checkpoint_dir) print("Now Start Testing...") result = self.pred.eval( {self.images: input_[0].reshape(1, self.h, self.w, self.c_dim)}) x = np.squeeze(result) checkimage(x) print(x.shape) imsave(x, 'result/result2.png', config)
def train(self, config): # NOTE : if train, the nx, ny are ingnored nx, ny = input_setup(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) # Stochastic gradient descent with the standard backpropagation # NOTE: learning rate decay global_step = tf.Variable(0, trainable=False) #learning_rate = tf.train.exponential_decay(config.learning_rate, global_step * config.batch_size, len(input_)*100, 0.1, staircase=True) # NOTE: Clip gradient opt = tf.train.AdamOptimizer(learning_rate=config.learning_rate) grad_and_value = opt.compute_gradients(self.loss) clip = tf.Variable(config.clip_grad, name='clip') capped_gvs = [(tf.clip_by_value(grad, -(clip), clip), var) for grad, var in grad_and_value] self.train_op = opt.apply_gradients(capped_gvs, global_step=global_step) #self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss) tf.initialize_all_variables().run() counter = 0 time_ = time.time() self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") for ep in range(config.epoch): # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): batch_images = input_[idx * config.batch_size:(idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size:(idx + 1) * config.batch_size] counter += 1 _, err = self.sess.run([self.train_op, self.loss], feed_dict={ self.images: batch_images, self.labels: batch_labels }) if counter % 10 == 0: print( "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep + 1), counter, time.time() - time_, err)) if counter % 500 == 0: self.save(config.checkpoint_dir, counter) # Test else: print("Now Start Testing...") result = self.pred.eval({self.images: input_}) + input_ image = merge(result, [nx, ny], self.c_dim) checkimage(merge(result, [nx, ny], self.c_dim)) #checkimage(image_LR) imsave(image, config.result_dir + '/result.png', config)
def prepare_data(config): # Prepares data if load_existing_data is False if not config.load_existing_data: input_setup(config) # Loads data from data_dir print('Loading data...') data_dir = checkpoint_dir(config) input_, label_, paths_ = read_data(data_dir, config) # Shuffles training data print('Shuffling data...') numData = np.arange(input_.shape[0]) np.random.shuffle(numData) input_ = input_[numData] label_ = label_[numData] # Prepares frame sets for feeding into different spatial # transformers if training mode is 2 if FLAGS.train_mode == 2: print("Preparing frames sets for spatial transformers...") curr_prev_imgs = input_[:, :, :, 0:(2 * config.c_dim)] curr_next_imgs = np.concatenate( (input_[:, :, :, 0:config.c_dim], input_[:, :, :, (2 * config.c_dim):(3 * config.c_dim)]), axis=3) curr_prev_imgs = tf.cast(curr_prev_imgs, tf.float32) curr_next_imgs = tf.cast(curr_next_imgs, tf.float32) label_ = tf.cast(label_, tf.float32) # Provides data in batch one at a time to tf.train.batch input_queue = tf.train.slice_input_producer( [curr_prev_imgs, curr_next_imgs, label_], shuffle=False) x1, x2, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size) return x1, x2, y elif FLAGS.train_mode == 4: # Upscales input data using bicubic interpolation print('Upscaling training data using Bicubic Interpolation...') input_new = [] for i in range(len(input_)): input_new.append( sp.misc.imresize(input_[i], (config.image_size * config.scale, config.image_size * config.scale), interp='bicubic')) input_ = np.array(input_new) input_ = tf.cast(input_, tf.float32) label_ = tf.cast(label_, tf.float32) # Provides data in batch one at a time to tf.train.batch input_queue = tf.train.slice_input_producer([input_, label_], shuffle=False) x1, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size) return x1, y else: input_ = tf.cast(input_, tf.float32) label_ = tf.cast(label_, tf.float32) # Provides data in batch one at a time to tf.train.batch input_queue = tf.train.slice_input_producer([input_, label_], shuffle=False) x1, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size) return x1, y
def train(self, config): # NOTE : if train, the nx, ny are ingnored nx, ny = input_setup(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) # Stochastic gradient descent with tself.des_block_ALLhe standard backpropagation self.train_op = tf.train.AdamOptimizer( learning_rate=config.learning_rate).minimize(self.loss) tf.initialize_all_variables().run() counter = 0 time_ = time.time() self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") for ep in range(config.epoch): # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): batch_images = input_[idx * config.batch_size:(idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size:(idx + 1) * config.batch_size] counter += 1 _, err = self.sess.run( [self.train_op, self.loss], feed_dict={ self.images: batch_images, self.labels: batch_labels, self.batch: 1, self.deconv_output: [ self.batch_size, self.label_size, self.label_size, 256 ] }) if counter % 10 == 0: print( "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep + 1), counter, time.time() - time_, err)) if counter % 500 == 0: self.save(config.checkpoint_dir, counter) # Test else: print("Now Start Testing...") res = list() for i in range(len(input_)): result = self.pred.eval({ self.images: input_[i].reshape(1, input_[i].shape[0], input_[i].shape[1], 3), self.deconv_output: [1, self.label_size, self.label_size, 256] }) # back to interval [0 , 1] x = np.squeeze(result) x = (x + 1) / 2 res.append(x) res = np.asarray(res) res = merge(res, [nx, ny], self.c_dim) if self.test_img is "": imsave(res, config.result_dir + '/result.png', config) else: string = self.test_img.split(".") print(string) imsave(res, config.result_dir + '/' + string[0] + '.png', config)
def train(self, config): # NOTE : if train, the nx, ny are ingnored # Read image files and make their sub-images and saved them as a h5 file format #nx, ny = input_setup(config) # get the target(train/test) .h5 file data_dir = checkpoint_dir(config) # Read h5 format data file #input_, label_ = read_data(data_dir) input_, label_ = read_data_test(data_dir) # Stochastic gradient descent with the standard backpropagation # NOTE: learning rate decay global_step = tf.Variable(0, trainable=False) #learning_rate = tf.train.exponential_decay(config.learning_rate, global_step * config.batch_size, len(input_)*100, 0.1, staircase=True) # NOTE: Clip gradient opt = tf.train.AdamOptimizer(learning_rate=config.learning_rate) grad_and_value = opt.compute_gradients(self.loss) clip = tf.Variable(config.clip_grad, name='clip') capped_gvs = [(tf.clip_by_value(grad, -(clip), clip), var) for grad, var in grad_and_value] self.train_op = opt.apply_gradients(capped_gvs, global_step=global_step) #self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss) #Init all Var tf.global_variables_initializer().run() counter = 0 time_ = time.time() #To load the checkpoint use to test or pretrain and refine model self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") for ep in range(config.epoch): # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): batch_images = input_[idx * config.batch_size:(idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size:(idx + 1) * config.batch_size] counter += 1 #translation Tensor from (64,1,41,41) to (?,41,41,1) batch_images = np.transpose(batch_images, (0, 2, 3, 1)) batch_labels = np.transpose(batch_labels, (0, 2, 3, 1)) _, err = self.sess.run([self.train_op, self.loss], feed_dict={ self.images: batch_images, self.labels: batch_labels }) if counter % 100 == 0: print( "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep + 1), counter, time.time() - time_, err)) if counter % 1000 == 0: self.save(config.checkpoint_dir, counter)
def train(self, config): # NOTE : if train, the nx, ny are ingnored #print("config.is_train:", config.is_train) nx, ny, original_shape = input_setup(config) #print("nx, ny, original_shape:", nx, ny, original_shape) data_dir = checkpoint_dir(config) print("reading data..") input_, label_ = read_data(data_dir) print("input_", input_.shape) merged_summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter("./log/train_300") #, self.sess.graph) #self.summary_writer = tf.summary.FileWriter("./log/", tf.get_default_graph()) # Stochastic gradient descent with the standard backpropagation #self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss) self.optimizer = tf.train.AdamOptimizer(learning_rate=config.learning_rate) self.train_op = self.optimizer.minimize(self.loss) #self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss) tf.initialize_all_variables().run() counter = 0 time_ = time.time() self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") #for ep in range(config.epoch): for ep in range(300, 1000+1, 1): #print("ep:", ep) #sys.exit() loss_summary_per_batch = [] # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): batch_images = input_[idx * config.batch_size : (idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size : (idx + 1) * config.batch_size] counter += 1 _, err, summary = self.sess.run([self.train_op, self.loss, merged_summary_op], feed_dict={self.images: batch_images, self.labels: batch_labels}) summary_pb = tf.summary.Summary() summary_pb.ParseFromString(summary) summaries = {} for val in summary_pb.value: summaries[val.tag] = val.simple_value #print("summaries:", summaries) loss_summary_per_batch.append(summaries['loss']) summary_writer.add_summary(summary, (ep) * counter) #self.summary_writer.add_summary(summary, (ep+1) * counter) if counter % 1000 == 0: print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep), counter, time.time()-time_, err)) #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err) #print("Epoch: [%2d], loss: [%.8f]", (ep+1), tf.reduce_mean(tf.square(label_ - self.pred.eval({self.images: input_})))) #if counter % 500 == 0: #if counter % 20 == 0: # self.save(config.checkpoint_dir, counter) if ep ==0 or ep % 10 == 0: self.save(config.checkpoint_dir, ep) ### ''' try: config.is_train = False nx_, ny_, original_shape_ = input_setup(config) data_dir_ = checkpoint_dir(config) input__, label__ = read_data(data_dir_) print("Now Start Testing...") result_ = self.pred.eval({self.images: input__}) image_ = merge(result_, [nx_, ny_], self.c_dim) print("image after merge:", image_.shape) print("[nx_, ny_]:", [nx_, ny_]) print("original_shape:", original_shape_) print(type(image__), type(original_shape_[0]), type(original_shape_[1])) cropped_img_ = crop_center(image, original_shape_[0], original_shape_[1]) print("cropped_img_:", cropped_img_.shape) imsave(image_, config.result_dir + '/result-' + ep + '.png', config) imsave(cropped_img_, config.result_dir + '/result_crop-' + ep + '.png', config) except: print("Unexpected error while evaluating image:", sys.exc_info()[0]) config.is_train = True ''' ### print("loss per epoch[%d] loss: [%.8f]" % ((ep), np.mean(loss_summary_per_batch))) summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag="loss per epoch", simple_value=np.mean(loss_summary_per_batch)),]), ((ep))) summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag="learning rate", simple_value=self.optimizer._lr),]), ((ep))) #print("learning rate:", self.optimizer._lr) # Test else: print("Now Start Testing...") #print("nx","ny",nx,ny) result = self.pred.eval({self.images: input_}) print("result:", result.shape) #print(label_[1] - result[1]) image = merge(result, [nx, ny], self.c_dim) print("image after merge:", image.shape) print("[nx, ny]:", [nx, ny]) print("original_shape:", original_shape) print(type(image), type(original_shape[0]), type(original_shape[1])) cropped_img = crop_center(image, original_shape[0], original_shape[1]) print("cropped_img:", cropped_img.shape) #image_LR = merge(input_, [nx, ny], self.c_dim) #checkimage(image_LR) imsave(image, config.result_dir+'/result.png', config) imsave(cropped_img, config.result_dir+'/result_crop.png', config)
def train(self, config): # NOTE : if train, the nx, ny are ingnored input_setup(config) data_dir = checkpoint_dir(config) input_, label_ = read_data(data_dir) residul = make_bicubic(input_, config.scale) ''' opt = tf.train.AdamOptimizer(learning_rate=config.learning_rate) grad_and_value = opt.compute_gradients(self.loss) clip = tf.Variable(0.1, name='clip') capped_gvs = [(tf.clip_by_value(grad, -(clip), clip), var) for grad, var in grad_and_value] self.train_op = opt.apply_gradients(capped_gvs) ''' # Stochastic gradient descent with the standard backpropagation self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss) tf.initialize_all_variables().run() counter = 0 time_ = time.time() self.load(config.checkpoint_dir) # Train if config.is_train: print("Now Start Training...") for ep in range(config.epoch): # Run by batch images batch_idxs = len(input_) // config.batch_size for idx in range(0, batch_idxs): batch_images = input_[idx * config.batch_size : (idx + 1) * config.batch_size] batch_residul = residul[idx * config.batch_size : (idx + 1) * config.batch_size] batch_labels = label_[idx * config.batch_size : (idx + 1) * config.batch_size] counter += 1 _, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels, self.residul: batch_residul }) if counter % 10 == 0: print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep+1), counter, time.time()-time_, err)) #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err) if counter % 500 == 0: self.save(config.checkpoint_dir, counter) # Test else: print("Now Start Testing...") print(input_[0].shape) checkimage(residul[0]) result = self.pred.eval({self.images: input_[0].reshape(1, input_[0].shape[0], input_[0].shape[1], self.c_dim)}) x = np.squeeze(result) checkimage(x) x = residul[0] + x # back to interval [0 , 1] x = ( x + 1 ) / 2 checkimage(x) print(x.shape) imsave(x, config.result_dir+'/result.png', config)