Example #1
0
    def subsample_indicator(indicator, num_samples):
        """Subsample indicator vector.

    Given a boolean indicator vector with M elements set to `True`, the function
    assigns all but `num_samples` of these previously `True` elements to
    `False`. If `num_samples` is greater than M, the original indicator vector
    is returned.

    Args:
      indicator: a 1-dimensional boolean tensor indicating which elements
        are allowed to be sampled and which are not.
      num_samples: int32 scalar tensor

    Returns:
      a boolean tensor with the same shape as input (indicator) tensor
    """
        indices = tf.where(indicator)
        indices = tf.random_shuffle(indices)
        indices = tf.reshape(indices, [-1])

        num_samples = tf.minimum(tf.size(indices), num_samples)
        selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))

        selected_indicator = ops.indices_to_dense_vector(
            selected_indices,
            tf.shape(indicator)[0])

        return tf.equal(selected_indicator, 1)
Example #2
0
    def _compute_loss(self,
                      prediction_tensor,
                      target_tensor,
                      weights,
                      class_indices=None):
        """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape, either [batch_size, num_anchors,
        num_classes] or [batch_size, num_anchors, 1]. If the shape is
        [batch_size, num_anchors, 1], all the classses are equally weighted.
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors, num_classes]
        representing the value of the loss function.
    """
        if class_indices is not None:
            weights *= tf.reshape(
                ops.indices_to_dense_vector(class_indices,
                                            tf.shape(prediction_tensor)[2]),
                [1, 1, -1])
        per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
            labels=target_tensor, logits=prediction_tensor))
        return per_entry_cross_ent * weights
Example #3
0
  def _compute_loss(self,
                    prediction_tensor,
                    target_tensor,
                    weights,
                    class_indices=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors, num_classes]
        representing the value of the loss function.
    """
    weights = tf.expand_dims(weights, 2)
    if class_indices is not None:
      weights *= tf.reshape(
          ops.indices_to_dense_vector(class_indices,
                                      tf.shape(prediction_tensor)[2]),
          [1, 1, -1])
    per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
        labels=target_tensor, logits=prediction_tensor))
    return per_entry_cross_ent * weights
    def _compute_loss(self,
                      prediction_tensor,
                      target_tensor,
                      weights,
                      class_indices=None):
        """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
            or a float tensor of shape [batch_size, num_anchors]
    """
        weights = tf.expand_dims(weights, 2)
        if class_indices is not None:
            weights *= tf.reshape(
                ops.indices_to_dense_vector(class_indices,
                                            tf.shape(prediction_tensor)[2]),
                [1, 1, -1])
        preds = tf.nn.sigmoid(prediction_tensor)
        preds = tf.where(tf.equal(target_tensor, 1), preds, 1. - preds)
        losses = -(1. - preds)**self._gamma * tf.log(preds + self._epsilon)
        if self._anchorwise_output:
            return tf.reduce_sum(losses * weights, 2)
        return tf.reduce_sum(losses * weights)
Example #5
0
    def _compute_loss(self,
                      prediction_tensor,
                      target_tensor,
                      weights,
                      class_indices=None):
        """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
            or a float tensor of shape [batch_size, num_anchors]
    """
        weights = tf.expand_dims(weights, 2)
        if class_indices is not None:
            weights *= tf.reshape(
                ops.indices_to_dense_vector(class_indices,
                                            tf.shape(prediction_tensor)[2]),
                [1, 1, -1])
        per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
            labels=target_tensor, logits=prediction_tensor))
        if self._anchorwise_output:
            return tf.reduce_sum(per_entry_cross_ent * weights, 2)
        return tf.reduce_sum(per_entry_cross_ent * weights)
Example #6
0
  def subsample_indicator(indicator, num_samples):
    """Subsample indicator vector.

    Given a boolean indicator vector with M elements set to `True`, the function
    assigns all but `num_samples` of these previously `True` elements to
    `False`. If `num_samples` is greater than M, the original indicator vector
    is returned.

    Args:
      indicator: a 1-dimensional boolean tensor indicating which elements
        are allowed to be sampled and which are not.
      num_samples: int32 scalar tensor

    Returns:
      a boolean tensor with the same shape as input (indicator) tensor
    """
    indices = tf.where(indicator)
    indices = tf.random_shuffle(indices)
    indices = tf.reshape(indices, [-1])

    num_samples = tf.minimum(tf.size(indices), num_samples)
    selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))

    selected_indicator = ops.indices_to_dense_vector(selected_indices,
                                                     tf.shape(indicator)[0])

    return tf.equal(selected_indicator, 1)
Example #7
0
    def _compute_loss(self,
                      prediction_tensor,
                      target_tensor,
                      weights,
                      class_indices=None):
        """Computes the loss value.

    Dice loss uses the area of the ground truth and prediction tensors for
    normalization. We compute area by summing along the anchors (2nd) dimension.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_pixels,
        num_classes] representing the predicted logits for each class.
        num_pixels denotes the total number of pixels in the spatial dimensions
        of the mask after flattening.
      target_tensor: A float tensor of shape [batch_size, num_pixels,
        num_classes] representing one-hot encoded classification targets.
        num_pixels denotes the total number of pixels in the spatial dimensions
        of the mask after flattening.
      weights: a float tensor of shape, either [batch_size, num_anchors,
        num_classes] or [batch_size, num_anchors, 1]. If the shape is
        [batch_size, num_anchors, 1], all the classses are equally weighted.
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a float tensor of shape [batch_size, num_classes]
        representing the value of the loss function.
    """
        if class_indices is not None:
            weights *= tf.reshape(
                ops.indices_to_dense_vector(class_indices,
                                            tf.shape(prediction_tensor)[2]),
                [1, 1, -1])

        if self.is_prediction_probability:
            prob_tensor = prediction_tensor
        else:
            prob_tensor = tf.nn.sigmoid(prediction_tensor)

        if self._squared_normalization:
            prob_tensor = tf.pow(prob_tensor, 2)
            target_tensor = tf.pow(target_tensor, 2)

        prob_tensor *= weights
        target_tensor *= weights

        prediction_area = tf.reduce_sum(prob_tensor, axis=1)
        gt_area = tf.reduce_sum(target_tensor, axis=1)

        intersection = tf.reduce_sum(prob_tensor * target_tensor, axis=1)
        dice_coeff = 2 * intersection / tf.maximum(gt_area + prediction_area,
                                                   1.0)
        dice_loss = 1 - dice_coeff

        return dice_loss
Example #8
0
  def test_indices_to_dense_vector_empty_indices_as_input(self):
    size = 500
    rand_indices = []

    expected_output = np.zeros(size, dtype=np.float32)

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(tf_rand_indices, size)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #9
0
  def test_indices_to_dense_vector_empty_indices_as_input(self):
    size = 500
    rand_indices = []

    expected_output = np.zeros(size, dtype=np.float32)

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(tf_rand_indices, size)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #10
0
  def test_indices_to_dense_vector_all_indices_as_input(self):
    size = 500
    num_indices = 500
    rand_indices = np.random.permutation(np.arange(size))[0:num_indices]

    expected_output = np.ones(size, dtype=np.float32)

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(tf_rand_indices, size)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #11
0
  def test_indices_to_dense_vector_all_indices_as_input(self):
    size = 500
    num_indices = 500
    rand_indices = np.random.permutation(np.arange(size))[0:num_indices]

    expected_output = np.ones(size, dtype=np.float32)

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(tf_rand_indices, size)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #12
0
 def _compute_loss(self,
                   prediction_tensor,
                   target_tensor,
                   weights,
                   class_indices=None):
     # Weights are the upper bounds.
     weights = tf.expand_dims(weights, 2)
     if class_indices is not None:
         weights *= tf.reshape(
             ops.indices_to_dense_vector(class_indices,
                                         tf.shape(prediction_tensor)[2]),
             [1, 1, -1])
     per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
         labels=target_tensor, logits=prediction_tensor))
     return (per_entry_cross_ent *
             tf.cast(prediction_tensor < weights, tf.float32))
Example #13
0
  def test_indices_to_dense_vector_int(self):
    size = 500
    num_indices = 25
    rand_indices = np.random.permutation(np.arange(size))[0:num_indices]

    expected_output = np.zeros(size, dtype=np.int64)
    expected_output[rand_indices] = 1

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(
        tf_rand_indices, size, 1, dtype=tf.int64)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #14
0
  def test_indices_to_dense_vector_int(self):
    size = 500
    num_indices = 25
    rand_indices = np.random.permutation(np.arange(size))[0:num_indices]

    expected_output = np.zeros(size, dtype=np.int64)
    expected_output[rand_indices] = 1

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(
        tf_rand_indices, size, 1, dtype=tf.int64)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
     def fn(x, positive_fraction=positive_fraction):
         '''
   pick indicies from valid_inds. If
   positive_fraction is not None use labels
   to do balanecd sampling.
 '''
         labels, valid_inds = x
         indicators = ops.indices_to_dense_vector(
             valid_inds,
             tf.shape(labels)[0])
         indicators = tf.cast(indicators, tf.bool)
         if positive_fraction is None:
             positive_fraction = 1.0
             labels = tf.ones_like(labels, tf.bool)
         return util.balanced_subsample(indicators, labels,
                                        self._ncobj_proposals,
                                        positive_fraction)
Example #16
0
 def _compute_loss(self,
                   prediction_tensor,
                   target_tensor,
                   weights,
                   class_indices=None):
     weights = tf.expand_dims(weights, 2)
     if class_indices is not None:
         weights *= tf.reshape(
             ops.indices_to_dense_vector(class_indices,
                                         tf.shape(prediction_tensor)[2]),
             [1, 1, -1])
     # Directly regress to soft targets.
     # target_tensor = tf.concat([1 - weights, weights], axis=-1)
     # per_entry_cross_ent = (tf.nn.softmax_cross_entropy_with_logits_v2(
     #     labels=target_tensor, logits=prediction_tensor))
     per_entry_cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
         labels=target_tensor * weights, logits=prediction_tensor)
     return per_entry_cross_ent
Example #17
0
  def _compute_loss(self,
                    prediction_tensor,
                    target_tensor,
                    weights,
                    class_indices=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
            or a float tensor of shape [batch_size, num_anchors]
    """
    weights = tf.expand_dims(weights, 2)
    if class_indices is not None:
      weights *= tf.reshape(
          ops.indices_to_dense_vector(class_indices,
                                      tf.shape(prediction_tensor)[2]),
          [1, 1, -1])
    per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
        labels=target_tensor, logits=prediction_tensor))
    prediction_probabilities = tf.sigmoid(prediction_tensor)
    p_t = ((target_tensor * prediction_probabilities) +
           ((1 - target_tensor) * (1 - prediction_probabilities)))
    modulating_factor = 1.0
    if self._gamma:
      modulating_factor = tf.pow(1.0 - p_t, self._gamma)
    alpha_weight_factor = 1.0
    if self._alpha is not None:
      alpha_weight_factor = (target_tensor * self._alpha +
                             (1 - target_tensor) * (1 - self._alpha))
    focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
                                per_entry_cross_ent)
    if self._anchorwise_output:
      return tf.reduce_sum(focal_cross_entropy_loss * weights, 2)
    return tf.reduce_sum(focal_cross_entropy_loss * weights)
Example #18
0
  def test_indices_to_dense_vector_size_at_inference(self):
    size = 5000
    num_indices = 250
    all_indices = np.arange(size)
    rand_indices = np.random.permutation(all_indices)[0:num_indices]

    expected_output = np.zeros(size, dtype=np.float32)
    expected_output[rand_indices] = 1.

    tf_all_indices = tf.placeholder(tf.int32)
    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(tf_rand_indices,
                                            tf.shape(tf_all_indices)[0])
    feed_dict = {tf_all_indices: all_indices}

    with self.test_session() as sess:
      output = sess.run(indicator, feed_dict=feed_dict)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #19
0
  def test_indices_to_dense_vector_size_at_inference(self):
    size = 5000
    num_indices = 250
    all_indices = np.arange(size)
    rand_indices = np.random.permutation(all_indices)[0:num_indices]

    expected_output = np.zeros(size, dtype=np.float32)
    expected_output[rand_indices] = 1.

    tf_all_indices = tf.placeholder(tf.int32)
    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(tf_rand_indices,
                                            tf.shape(tf_all_indices)[0])
    feed_dict = {tf_all_indices: all_indices}

    with self.test_session() as sess:
      output = sess.run(indicator, feed_dict=feed_dict)
      self.assertAllEqual(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #20
0
    def _compute_loss(self,
                      prediction_tensor,
                      target_tensor,
                      weights,
                      class_indices=None):
        """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
            or a float tensor of shape [batch_size, num_anchors]
    """
        weights = tf.expand_dims(weights, 2)
        if class_indices is not None:
            weights *= tf.reshape(
                ops.indices_to_dense_vector(class_indices,
                                            tf.shape(prediction_tensor)[2]),
                [1, 1, -1])
        per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
            labels=target_tensor, logits=prediction_tensor))
        prediction_probabilities = tf.sigmoid(prediction_tensor)
        p_t = ((target_tensor * prediction_probabilities) +
               ((1 - target_tensor) * (1 - prediction_probabilities)))
        modulating_factor = 1.0
        if self._gamma:
            modulating_factor = tf.pow(1.0 - p_t, self._gamma)
        alpha_weight_factor = 1.0
        if self._alpha is not None:
            alpha_weight_factor = (target_tensor * self._alpha +
                                   (1 - target_tensor) * (1 - self._alpha))
        focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
                                    per_entry_cross_ent)
        if self._anchorwise_output:
            return tf.reduce_sum(focal_cross_entropy_loss * weights, 2)
        return tf.reduce_sum(focal_cross_entropy_loss * weights)
Example #21
0
    def test_indices_to_dense_vector_custom_values(self):
        size = 100
        num_indices = 10
        rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
        indices_value = np.random.rand(1)
        default_value = np.random.rand(1)

        expected_output = np.float32(np.ones(size) * default_value)
        expected_output[rand_indices] = indices_value

        tf_rand_indices = tf.constant(rand_indices)
        indicator = ops.indices_to_dense_vector(tf_rand_indices,
                                                size,
                                                indices_value=indices_value,
                                                default_value=default_value)

        with self.test_session() as sess:
            output = sess.run(indicator)
            self.assertAllClose(output, expected_output)
            self.assertEqual(output.dtype, expected_output.dtype)
Example #22
0
 def _compute_loss(self,
                   prediction_tensor,
                   target_tensor,
                   weights,
                   class_indices=None):
     # Weights are the upper bounds.
     weights = tf.expand_dims(weights, 2)
     if class_indices is not None:
         weights *= tf.reshape(
             ops.indices_to_dense_vector(class_indices,
                                         tf.shape(prediction_tensor)[2]),
             [1, 1, -1])
     per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
         labels=target_tensor, logits=prediction_tensor))
     squared_dist = tf.square(
         tf.sigmoid(prediction_tensor) - target_tensor) * 20.
     above_conf = tf.cast(prediction_tensor > weights, tf.float32)
     # Apply quadratic loss if predictions go above soft target.
     loss = per_entry_cross_ent + above_conf * squared_dist
     return loss
Example #23
0
  def test_indices_to_dense_vector_custom_values(self):
    size = 100
    num_indices = 10
    rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
    indices_value = np.random.rand(1)
    default_value = np.random.rand(1)

    expected_output = np.float32(np.ones(size) * default_value)
    expected_output[rand_indices] = indices_value

    tf_rand_indices = tf.constant(rand_indices)
    indicator = ops.indices_to_dense_vector(
        tf_rand_indices,
        size,
        indices_value=indices_value,
        default_value=default_value)

    with self.test_session() as sess:
      output = sess.run(indicator)
      self.assertAllClose(output, expected_output)
      self.assertEqual(output.dtype, expected_output.dtype)
Example #24
0
    def _compute_loss(
        self,
        prediction_tensor,  #get the prediction 
        target_tensor,
        weights,  #This is the obtained weigts 1or zero based on the similarity default box to grount truth box 
        class_indices=None):
        """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]
      class_indices: (Optional) A 1-D integer tensor of class indices.
        If provided, computes loss only for the specified class indices.

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
            or a float tensor of shape [batch_size, num_anchors]
    """

        #normal way of calculating z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) (z is the class labels , x is the prediction )
        #reed tensorflow if needed     z labels ,x is score or logit
        #This is good if we have probabilistic labels , not  1 or zero hard labels

        weights = tf.expand_dims(weights, 2)
        if class_indices is not None:
            weights *= tf.reshape(
                ops.indices_to_dense_vector(class_indices,
                                            tf.shape(prediction_tensor)[2]),
                [1, 1, -1])
        per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
            labels=target_tensor, logits=prediction_tensor))
        if self._anchorwise_output:
            return tf.reduce_sum(per_entry_cross_ent * weights, 2)
        return tf.reduce_sum(per_entry_cross_ent * weights)