Beispiel #1
0
  def build_loss(self):
    response = self.response
    response_size = response.get_shape().as_list()[1:3]  # [height, width]

    gt = _construct_gt_response(response_size,
                                self.config['batch_size'],
                                self.stride,
                                self.config['gt_config'])
    self.gt=gt

    with tf.name_scope('Loss'):
      loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=response,
                                                     labels=gt)

      with tf.name_scope('Balance_weights'):
        n_pos = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 1)))
        n_neg = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 0)))
        w_pos = 0.5 / n_pos
        w_neg = 0.5 / n_neg
        class_weights = tf.where(tf.equal(gt, 1),
                                 w_pos * tf.ones_like(gt),
                                 tf.ones_like(gt))
        class_weights = tf.where(tf.equal(gt, 0),
                                 w_neg * tf.ones_like(gt),
                                 class_weights)
        loss = loss * class_weights

      # Note that we use reduce_sum instead of reduce_mean since the loss has
      # already been normalized by class_weights in spatial dimension.
      loss = tf.reduce_sum(loss, [1, 2])

      batch_loss = tf.reduce_mean(loss, name='batch_loss')
      tf.losses.add_loss(batch_loss)

      total_loss = tf.losses.get_total_loss()
      self.batch_loss = batch_loss
      self.total_loss = total_loss


      tf.summary.image(self.mode+'exemplar', self.examplars)
      tf.summary.image(self.mode+'instance', self.instances)

      mean_batch_loss, update_op1 = tf.metrics.mean(batch_loss)
      mean_total_loss, update_op2 = tf.metrics.mean(total_loss)
      with tf.control_dependencies([update_op1, update_op2]):

        tf.summary.scalar(self.mode+'batch_loss', mean_batch_loss)
        tf.summary.scalar(self.mode+'total_loss', mean_total_loss)

      if self.mode == 'train':
        with tf.name_scope("GT"):
          tf.summary.image('GT', tf.reshape(gt[0], [1] + response_size + [1]))
     
      tf.summary.image(self.mode+'Response', tf.expand_dims(tf.sigmoid(response), -1))
      tf.summary.histogram(self.mode+'Response', self.response)

        # Two more metrics to monitor the performance of training
      tf.summary.scalar(self.mode+'center_score_error', center_score_error(response))
      tf.summary.scalar(self.mode+'center_dist_error', center_dist_error(response))
  def build_loss(self):
    response = self.response
    response_size = response.get_shape().as_list()[1:3]  # [height, width]

    gt = construct_gt_score_maps(response_size,
                                 self.data_config['batch_size'],
                                 self.model_config['embed_config']['stride'],
                                 self.train_config['gt_config'])

    with tf.name_scope('Loss'):
      loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=response,
                                                     labels=gt)

      with tf.name_scope('Balance_weights'):
        n_pos = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 1)))
        n_neg = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 0)))
        w_pos = 0.5 / n_pos
        w_neg = 0.5 / n_neg
        class_weights = tf.where(tf.equal(gt, 1),
                                 w_pos * tf.ones_like(gt),
                                 tf.ones_like(gt))
        class_weights = tf.where(tf.equal(gt, 0),
                                 w_neg * tf.ones_like(gt),
                                 class_weights)
        loss = loss * class_weights

      # Note that we use reduce_sum instead of reduce_mean since the loss has
      # already been normalized by class_weights in spatial dimension.
      loss = tf.reduce_sum(loss, [1, 2])

      batch_loss = tf.reduce_mean(loss, name='batch_loss')
      tf.losses.add_loss(batch_loss)

      total_loss = tf.losses.get_total_loss()
      self.batch_loss = batch_loss
      self.total_loss = total_loss

      tf.summary.image('exemplar', self.exemplars, family=self.mode)
      tf.summary.image('instance', self.instances, family=self.mode)

      mean_batch_loss, update_op1 = tf.metrics.mean(batch_loss)
      mean_total_loss, update_op2 = tf.metrics.mean(total_loss)
      with tf.control_dependencies([update_op1, update_op2]):
        tf.summary.scalar('batch_loss', mean_batch_loss, family=self.mode)
        tf.summary.scalar('total_loss', mean_total_loss, family=self.mode)

      if self.mode == 'train':
        tf.summary.image('GT', tf.reshape(gt[0], [1] + response_size + [1]), family='GT')
      tf.summary.image('Response', tf.expand_dims(tf.sigmoid(response), -1), family=self.mode)
      tf.summary.histogram('Response', self.response, family=self.mode)

      # Two more metrics to monitor the performance of training
      tf.summary.scalar('center_score_error', center_score_error(response), family=self.mode)
      tf.summary.scalar('center_dist_error', center_dist_error(response), family=self.mode)
Beispiel #3
0
    def build_loss(self):
        response = self.response
        response_size = response.get_shape().as_list()[1:3]  # [height, width]

        gt = construct_gt_score_maps(
            response_size, self.data_config['batch_size'],
            self.model_config['embed_config']['stride'],
            self.train_config['gt_config'])

        # loss: https://www.renom.jp/ja/notebooks/tutorial/basic_algorithm/lossfunction/notebook.html
        with tf.name_scope('Loss'):
            loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=response,
                                                           labels=gt)

            with tf.name_scope('Balance_weights'):
                n_pos = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 1)))
                n_neg = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 0)))
                w_pos = 0.5 / n_pos
                w_neg = 0.5 / n_neg
                class_weights = tf.where(tf.equal(gt, 1),
                                         w_pos * tf.ones_like(gt),
                                         tf.ones_like(gt))
                class_weights = tf.where(tf.equal(gt, 0),
                                         w_neg * tf.ones_like(gt),
                                         class_weights)
                loss = loss * class_weights

            # Note that we use reduce_sum instead of reduce_mean since the loss has
            # already been normalized by class_weights in spatial dimension.
            loss = tf.reduce_sum(loss, [1, 2])

            batch_loss = tf.reduce_mean(loss, name='batch_loss')
            tf.losses.add_loss(batch_loss)

            total_loss = tf.losses.get_total_loss()
            self.batch_loss = batch_loss
            self.total_loss = total_loss

            # quantization
            # good note: https://www.tensorflowers.cn/t/7136
            if self.model_config['embed_config']['quantization']:
                if self.train_config["export"]:
                    contrib_quantize.create_eval_graph()
                else:
                    contrib_quantize.create_training_graph(quant_delay=200000)

            tf.summary.image('exemplar', self.exemplars, family=self.mode)
            tf.summary.image('instance', self.instances, family=self.mode)

            mean_batch_loss, update_op1 = tf.metrics.mean(batch_loss)
            mean_total_loss, update_op2 = tf.metrics.mean(total_loss)
            with tf.control_dependencies([update_op1, update_op2]):
                tf.summary.scalar('batch_loss',
                                  mean_batch_loss,
                                  family=self.mode)
                tf.summary.scalar('total_loss',
                                  mean_total_loss,
                                  family=self.mode)

            if self.mode == 'train':
                tf.summary.image('GT',
                                 tf.reshape(gt[0], [1] + response_size + [1]),
                                 family='GT')
            tf.summary.image('Response',
                             tf.expand_dims(tf.sigmoid(response), -1),
                             family=self.mode)
            tf.summary.histogram('Response', self.response, family=self.mode)

            # Two more metrics to monitor the performance of training
            tf.summary.scalar('center_score_error',
                              center_score_error(response),
                              family=self.mode)
            tf.summary.scalar('center_dist_error',
                              center_dist_error(response),
                              family=self.mode)
Beispiel #4
0
    def build_loss(self):
        response = self.response
        response_size = response.get_shape().as_list()[1:3]
        gt = construct_gt_score_maps(
            response_size, self.data_config['batch_size'] * 2,
            self.model_config['embed_config']['stride'],
            self.train_config['gt_config'])

        with tf.name_scope('Loss'):
            loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=response,
                                                           labels=gt)

            with tf.name_scope('Balance_weights'):
                n_pos = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 1)))
                n_neg = tf.reduce_sum(tf.to_float(tf.equal(gt[0], 0)))
                w_pos = 0.5 / n_pos
                w_neg = 0.5 / n_neg
                class_weights = tf.where(tf.equal(gt, 1),
                                         w_pos * tf.ones_like(gt),
                                         tf.ones_like(gt))
                class_weights = tf.where(tf.equal(gt, 0),
                                         w_neg * tf.ones_like(gt),
                                         class_weights)
                loss = loss * class_weights

            loss = tf.reduce_sum(loss, [1, 2])
            batch_loss = tf.reduce_mean(loss, name='batch_loss')
            tf.losses.add_loss(batch_loss)

            # Get total loss
            total_loss = tf.losses.get_total_loss()
            self.batch_loss = batch_loss
            self.total_loss = total_loss

            tf.summary.image('exemplar', self.exemplars_show, family=self.mode)
            tf.summary.image('instance', self.instances, family=self.mode)

            mean_total_loss, update_op2 = tf.metrics.mean(total_loss)
            with tf.control_dependencies([update_op2]):
                tf.summary.scalar('total_loss',
                                  mean_total_loss,
                                  family=self.mode)

            if self.mode == 'train':
                tf.summary.image('GT',
                                 tf.reshape(gt[0], [1] + response_size + [1]),
                                 family='GT')
            tf.summary.image('Response',
                             tf.expand_dims(tf.sigmoid(self.total_response),
                                            -1),
                             family=self.mode)
            tf.summary.histogram('Response',
                                 self.total_response,
                                 family=self.mode)

            # Two more metrics to monitor the performance of training
            tf.summary.scalar('center_score_error',
                              center_score_error(self.total_response),
                              family=self.mode)
            tf.summary.scalar('center_dist_error',
                              center_dist_error(self.total_response),
                              family=self.mode)