def __init__(self, flags): #Initialize: self.flags = flags image_size = int(self.flags.image_size) num_class = int(self.flags.num_class) # Place holder self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") self.image = tf.placeholder(tf.float32, shape=[None, image_size, image_size, 3], name="input_image") self.phase_train = tf.placeholder(tf.bool, name='phase_train') self.pred_annotation, self.image_segment_logits, self.reconstruct_image = \ self.inference(self.image, self.keep_probability, self.phase_train, self.flags) self.colorized_pred_annotation = utils.batch_colorize( self.pred_annotation, 0, num_class, self.flags.cmap) self.loss = tf.reduce_mean( tf.reshape((self.image - self.reconstruct_image)**2, shape=[-1])) # Train var and op trainable_var = tf.trainable_variables() if self.flags.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) self.learning_rate, self.train_op = self.train(self.loss, trainable_var, self.flags) self.learning_rate_summary = tf.summary.scalar("learning_rate", self.learning_rate) # Summary print("Setting up summary op...") tf.summary.image("input_image", self.image, max_outputs=2) tf.summary.image("reconstruct_image", self.reconstruct_image, max_outputs=2) tf.summary.image("pred_annotation", self.colorized_pred_annotation, max_outputs=2) self.loss_summary = tf.summary.scalar("total_loss", self.loss) self.summary_op = tf.summary.merge_all() # Session ,saver, and writer print("Setting up Session and Saver...") self.sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True)) self.saver = tf.train.Saver(max_to_keep=2) self.train_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'train'), self.sess.graph) self.validation_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'validation')) print("Initialize tf variables") self.sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(self.flags.logs_dir) if ckpt and ckpt.model_checkpoint_path: self.saver.restore(self.sess, ckpt.model_checkpoint_path) print("Model restored...") return
def __init__(self, flags): """ Initialize: placeholder, train_op, summary, session, saver and file_writer """ self.flags = flags image_size = int(self.flags.image_size) num_class = int(self.flags.num_class) # Place holder self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") self.image = tf.placeholder(tf.float32, shape=[None, image_size, image_size, 3], name="input_image") self.phase_train = tf.placeholder(tf.bool, name='phase_train') self.neighbor_indeces = tf.placeholder(tf.int64, name="neighbor_indeces") self.neighbor_vals = tf.placeholder(tf.float32, name="neighbor_vals") self.neighbor_shape = tf.placeholder(tf.int64, name="neighbor_shape") neighbor_filter = (self.neighbor_indeces, self.neighbor_vals, self.neighbor_shape) _image_weights = brightness_weight(self.image, neighbor_filter, sigma_I=0.05) image_weights = convert_to_batchTensor(*_image_weights) # Prediction and loss self.pred_annotation, self.image_segment_logits, self.reconstruct_image = \ self.inference(self.image, self.keep_probability, self.phase_train, self.flags) image_segment = tf.nn.softmax(self.image_segment_logits) self.colorized_pred_annotation = utils.batch_colorize( self.pred_annotation, 0, num_class, self.flags.cmap) self.reconstruct_loss = tf.reduce_mean( tf.reshape((self.image - self.reconstruct_image)**2, shape=[-1])) batch_soft_ncut = soft_ncut(self.image, image_segment, image_weights) self.soft_ncut = tf.reduce_mean(batch_soft_ncut) self.loss = self.reconstruct_loss + self.soft_ncut # Train var and op trainable_var = tf.trainable_variables() encode_trainable_var = tf.trainable_variables("infer_encode") if self.flags.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) self.reconst_learning_rate, self.train_reconst_op = \ self.train(self.reconstruct_loss, trainable_var, self.flags) self.softNcut_learning_rate, self.train_softNcut_op = \ self.train(self.soft_ncut, encode_trainable_var, self.flags) self.reconst_learning_rate_summary = tf.summary.scalar( "reconst_learning_rate", self.reconst_learning_rate) self.softNcut_learning_rate_summary = tf.summary.scalar( "softNcut_learning_rate", self.softNcut_learning_rate) # Summary tf.summary.image("input_image", self.image, max_outputs=2) tf.summary.image("reconstruct_image", self.reconstruct_image, max_outputs=2) tf.summary.image("pred_annotation", self.colorized_pred_annotation, max_outputs=2) reconstLoss_summary = tf.summary.scalar("reconstruct_loss", self.reconstruct_loss) softNcutLoss_summary = tf.summary.scalar("soft_ncut_loss", self.soft_ncut) totLoss_summary = tf.summary.scalar("total_loss", self.loss) self.loss_summary = tf.summary.merge( [reconstLoss_summary, softNcutLoss_summary, totLoss_summary]) self.summary_op = tf.summary.merge_all() # Session ,saver, and writer print("Setting up Session and Saver...") self.sess = tf.Session() self.saver = tf.train.Saver(max_to_keep=2) # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir self.train_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'train'), self.sess.graph) self.validation_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'validation')) print("Initialize tf variables") self.sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(self.flags.logs_dir) if ckpt and ckpt.model_checkpoint_path: self.saver.restore(self.sess, ckpt.model_checkpoint_path) print("Model restored...") return
def __init__(self, flags): """ Initialize: placeholder, train_op, summary, session, saver and file_writer """ self.flags = flags image_size = int(self.flags.image_size) num_class = int(self.flags.num_class) # Place holder self.image = tf.placeholder(tf.float32, shape=[None, image_size, image_size, 3], name="input_image") self.annotation = tf.placeholder( tf.int32, shape=[None, image_size, image_size, num_class], name="annotation") self.phase_train = tf.placeholder(tf.bool, name='phase_train') self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") # Prediction and loss self.pred_annotation, self.image_segment_logits = \ self.inference(self.image, self.keep_probability, self.phase_train, self.flags) image_segment = tf.nn.softmax(self.image_segment_logits) colorized_annotation = tf.argmax(self.annotation, axis=3) colorized_annotation = tf.expand_dims(colorized_annotation, dim=3) self.colorized_annotation = utils.batch_colorize( colorized_annotation, 0, num_class, self.flags.cmap) self.colorized_pred_annotation = utils.batch_colorize( self.pred_annotation, 0, num_class, self.flags.cmap) self.loss = tf.reduce_mean((tf.nn.softmax_cross_entropy_with_logits( logits=self.image_segment_logits, labels=self.annotation, name="entropy"))) # Train var and op trainable_var = tf.trainable_variables() if self.flags.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) self.learning_rate, self.train_op = self.train(self.loss, trainable_var, self.flags) self.learning_rate_summary = tf.summary.scalar("learning_rate", self.learning_rate) # Summary print("Setting up summary op...") tf.summary.image("input_image", self.image, max_outputs=2) tf.summary.image("annotation", self.colorized_annotation, max_outputs=2) tf.summary.image("pred_annotation", self.colorized_pred_annotation, max_outputs=2) self.loss_summary = tf.summary.scalar("total_loss", self.loss) self.summary_op = tf.summary.merge_all() # Session ,saver, and writer print("Setting up Session and Saver...") self.sess = tf.Session() self.saver = tf.train.Saver(max_to_keep=2) # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir self.train_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'train'), self.sess.graph) self.validation_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'validation')) print("Initialize tf variables") self.sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(self.flags.logs_dir) if ckpt and ckpt.model_checkpoint_path: self.saver.restore(self.sess, ckpt.model_checkpoint_path) print("Model restored...") return
def __init__(self, flags): """ Initialize: placeholder, train_op, summary, session, saver and file_writer """ self.flags = flags self.use_soft_ncut = self.flags.soft_ncut image_size = int(self.flags.image_size) num_class = int(self.flags.num_class) # Place holder self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") self.image = tf.placeholder(tf.float32, shape=[None, image_size, image_size, 4], name="input_image") self.vis = self.image[..., 0:3] self.ice = tf.expand_dims(self.image[..., 3], axis=-1) self.annotation = tf.placeholder(tf.float32, shape=[None, image_size, image_size], name="annotation") self.phase_train = tf.placeholder(tf.bool, name='phase_train') # Prediction and loss self.pred_annotation, self.image_segment_logits, self.reconstruct_ice = \ self.inference(self.image, self.keep_probability, self.phase_train, self.flags) image_segment = tf.nn.softmax(self.image_segment_logits) self.colorized_pred_annotation = utils.batch_colorize( self.pred_annotation, 0, num_class, self.flags.cmap) self.reconstruct_loss = tf.reduce_mean( tf.reshape(((self.ice - self.reconstruct_ice) / 255)**2, shape=[-1])) if self.use_soft_ncut: # TODO: USE ICE ACTUALLY so that it's unsupervised # batch_soft_ncut = global_soft_ncut(self.annotation, image_segment) batch_soft_ncut = soft_n_cut_loss(self.annotation, image_segment, \ num_class, self.flags.image_size, self.flags.image_size) self.soft_ncut = tf.reduce_mean(batch_soft_ncut) self.loss = self.reconstruct_loss + self.soft_ncut else: self.loss = self.reconstruct_loss # Train var and op trainable_var = tf.trainable_variables() encode_trainable_var = tf.trainable_variables("infer_encode") if self.flags.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) self.reconst_learning_rate, self.train_reconst_op = \ self.train(self.reconstruct_loss, trainable_var, self.flags) if self.use_soft_ncut: self.softNcut_learning_rate, self.train_softNcut_op = \ self.train(self.soft_ncut, encode_trainable_var, self.flags) self.reconst_learning_rate_summary = tf.summary.scalar( "reconst_learning_rate", self.reconst_learning_rate) if self.use_soft_ncut: self.softNcut_learning_rate_summary = tf.summary.scalar( "softNcut_learning_rate", self.softNcut_learning_rate) # Summary tf.summary.image("input_vis", self.vis, max_outputs=2) tf.summary.image("input_ice", self.ice, max_outputs=2) tf.summary.image("reconstruct_ice", self.reconstruct_ice, max_outputs=2) tf.summary.image("pred_annotation", self.colorized_pred_annotation, max_outputs=2) reconstLoss_summary = tf.summary.scalar("reconstruct_loss", self.reconstruct_loss) if self.use_soft_ncut: softNcutLoss_summary = tf.summary.scalar("soft_ncut_loss", self.soft_ncut) totLoss_summary = tf.summary.scalar("total_loss", self.loss) self.loss_summary = tf.summary.merge( [reconstLoss_summary, softNcutLoss_summary, totLoss_summary]) else: self.loss_summary = reconstLoss_summary self.summary_op = tf.summary.merge_all() # Session ,saver, and writer print("Setting up Session and Saver...") self.sess = tf.Session() self.saver = tf.train.Saver(max_to_keep=2) # create two summary writers to show training loss and validation loss in the same graph # need to create two folders 'train' and 'validation' inside FLAGS.logs_dir self.train_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'train'), self.sess.graph) self.validation_writer = tf.summary.FileWriter( os.path.join(self.flags.logs_dir, 'validation')) print("Initialize tf variables") self.sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(self.flags.logs_dir) if ckpt and ckpt.model_checkpoint_path: self.saver.restore(self.sess, ckpt.model_checkpoint_path) print("Model restored...") return