def cacuLoss(self, x): loss = tf.reduce_mean(tf.losses.absolute_difference(self.target, x.outputs)) #Using adam optimizer as mentioned in the paper optimizer = tf.train.AdamOptimizer() #This is the train operation for our objective self.train_op = optimizer.minimize(loss) PSNR = utils.psnr_tf(self.target, x.outputs) # Scalar to keep track for loss summary_loss = tf.summary.scalar("loss", loss) summary_psnr = tf.summary.scalar("PSNR", PSNR) streaming_loss, self.streaming_loss_update = tf.contrib.metrics.streaming_mean(loss) streaming_loss_scalar = tf.summary.scalar('loss',streaming_loss) streaming_psnr, self.streaming_psnr_update = tf.contrib.metrics.streaming_mean(PSNR) streaming_psnr_scalar = tf.summary.scalar('PSNR',streaming_psnr) # Image summaries for input, target, and output input_image = tf.summary.image("input_image", tf.cast(self.input, tf.uint8)) target_image = tf.summary.image("target_image", tf.cast(self.target, tf.uint8)) output_image = tf.summary.image("output_image", tf.cast(x.outputs, tf.uint8)) self.train_merge = tf.summary.merge([summary_loss,summary_psnr]) self.test_merge = tf.summary.merge([streaming_loss_scalar,streaming_psnr_scalar])
def calculate_loss(self, target=None, output=None): self.loss = tf.losses.absolute_difference(target, output) psnr = utils.psnr_tf(target, output, is_norm=True) tf.summary.scalar("loss", self.loss) tf.summary.scalar("PSNR", psnr) # # Image summaries for input, target, and output # tf.summary.image("input_image", tf.cast(self.input, tf.uint8)) # tf.summary.image("target_image", tf.cast(self.target, tf.uint8)) # tf.summary.image("output_image", tf.cast(x.outputs, tf.uint8)) self.merged = tf.summary.merge_all()
def cacuLoss(self): self.loss = tf.reduce_mean(tf.losses.absolute_difference(self.norm_target, self.output)) PSNR = utils.psnr_tf(self.norm_target, self.output, is_norm=True) # Scalar to keep track for loss summary_loss = tf.summary.scalar("loss", self.loss) summary_psnr = tf.summary.scalar("PSNR", PSNR) streaming_loss, self.streaming_loss_update = tf.contrib.metrics.streaming_mean(self.loss) streaming_loss_scalar = tf.summary.scalar('loss',streaming_loss) streaming_psnr, self.streaming_psnr_update = tf.contrib.metrics.streaming_mean(PSNR) streaming_psnr_scalar = tf.summary.scalar('PSNR',streaming_psnr) self.train_merge = tf.summary.merge([summary_loss,summary_psnr]) self.test_merge = tf.summary.merge([streaming_loss_scalar,streaming_psnr_scalar])
def cacuLoss(self, x): ''' caculate the loss, and write it to tensorboard; :param x: output tensor :return: None ''' loss = tf.losses.absolute_difference(self.target, x.outputs) #Using adam optimizer as mentioned in the paper optimizer = tf.train.AdamOptimizer(learning_rate=0.0001) #This is the train operation for our objective self.train_op = optimizer.minimize(loss) PSNR = utils.psnr_tf(self.target, x.outputs) streaming_loss, self.streaming_loss_update = tf.contrib.metrics.streaming_mean(loss) streaming_loss_scalar = tf.summary.scalar('loss',streaming_loss) streaming_psnr, self.streaming_psnr_update = tf.contrib.metrics.streaming_mean(PSNR) streaming_psnr_scalar = tf.summary.scalar('PSNR',streaming_psnr) self.test_merge = tf.summary.merge([streaming_loss_scalar]) # Scalar to keep track for loss summary_loss = tf.summary.scalar("loss_in_all", loss) summaries = [summary_loss] _, _, _, channel = self.target.get_shape() for i in range(channel // 3): loss =tf.losses.absolute_difference(self.target[:,:,:,i] , x.outputs[:,:,:,i]) summaries.append(tf.summary.scalar("loss%d" %(i), loss)) summary_psnr = tf.summary.scalar("PSNR", PSNR) summaries.append(utils.variable_summeries(x.outputs,'output')) summaries.append(utils.variable_summeries(self.target,'target')) # Image summaries for input, target, and output #input_image = tf.summary.image("input_image", tf.cast(self.input[:,:,0:1], tf.uint8)) #target_image = tf.summary.image("target_image", tf.cast(self.target[:,:,0:1], tf.uint8)) #output_image = tf.summary.image("output_image", tf.cast(x.outputs[:,:,0:1], tf.uint8)) self.train_merge = tf.summary.merge(summaries)
def cacuDenseNetLoss(self, output): l1_loss = tf.reduce_mean( tf.losses.absolute_difference(self.target, output)) l2_loss = tf.reduce_mean(tf.squared_difference(self.target, output)) vgg_target = custom_Vgg16(self.target, data_dict=self.data_dict) feature_target = [ vgg_target.conv1_2, vgg_target.conv2_2, vgg_target.conv3_3, vgg_target.conv4_3, vgg_target.conv5_3 ] vgg_output = custom_Vgg16(output, data_dict=self.data_dict) feature_output = [ vgg_output.conv1_2, vgg_output.conv2_2, vgg_output.conv3_3, vgg_output.conv4_3, vgg_output.conv5_3 ] per_loss_list = [] for f, f_ in zip(feature_target, feature_output): per_loss_list.append(tf.reduce_mean(tf.squared_difference(f, f_))) per_loss = self.lamba * tf.reduce_sum(per_loss_list) loss = l1_loss + l2_loss + per_loss learning_rate = utils.learning_rate_decay(self.learning_rate, self.global_step) #Using adam optimizer as mentioned in the paper optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) # optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) #This is the train operation for our objective self.train_op = optimizer.minimize(loss, global_step=self.global_step) # self.train_op = optimizer.minimize(loss,global_step=self.global_step) PSNR = utils.psnr_tf(self.target, output) # Scalar to keep track for loss summary_l1_loss = tf.summary.scalar("l1-loss", l1_loss) summary_l2_loss = tf.summary.scalar("l2-loss", l1_loss) summary_per_loss = tf.summary.scalar("per-loss", per_loss) summary_loss = tf.summary.scalar("loss", loss) summary_psnr = tf.summary.scalar("PSNR", PSNR) streaming_l1_loss, self.streaming_l1_loss_update = tf.contrib.metrics.streaming_mean( l1_loss) streaming_l1_loss_scalar = tf.summary.scalar('l1-loss', streaming_l1_loss) streaming_l2_loss, self.streaming_l2_loss_update = tf.contrib.metrics.streaming_mean( l2_loss) streaming_l2_loss_scalar = tf.summary.scalar('l2-loss', streaming_l2_loss) streaming_per_loss, self.streaming_per_loss_update = tf.contrib.metrics.streaming_mean( per_loss) streaming_per_loss_scalar = tf.summary.scalar('per-loss', streaming_per_loss) streaming_loss, self.streaming_loss_update = tf.contrib.metrics.streaming_mean( loss) streaming_loss_scalar = tf.summary.scalar('loss', streaming_loss) streaming_psnr, self.streaming_psnr_update = tf.contrib.metrics.streaming_mean( PSNR) streaming_psnr_scalar = tf.summary.scalar('PSNR', streaming_psnr) # Image summaries for input, target, and output ''' input_image = tf.summary.image("input_image", tf.cast(self.input, tf.uint8)) target_image = tf.summary.image("target_image", tf.cast(self.target, tf.uint8)) output_image = tf.summary.image("output_image", tf.cast(output.outputs, tf.uint8)) ''' self.train_merge = tf.summary.merge([ summary_l1_loss, summary_l2_loss, summary_psnr, summary_per_loss, summary_loss ]) self.test_merge = tf.summary.merge([ streaming_l1_loss_scalar, streaming_psnr_scalar, streaming_per_loss_scalar, streaming_loss_scalar, streaming_l2_loss_scalar ])