def _generator_loss(self, sr_predictions,
                        ground_truth_predictions) -> float:
        super_resolution_mean = tf.math.reduce_mean(sr_predictions)
        groud_truth_mean = tf.math.reduce_mean(ground_truth_predictions)

        groud_truth_average = ground_truth_predictions - super_resolution_mean
        super_resolution_average = sr_predictions - groud_truth_mean

        # Reshaping tensor from shape [batch_size, 1] to [1, batch_size],
        # because with the original shape `apply_softmax` was buggy and was
        # outputting an array of 1's like [1, 1, 1, 1, ...].
        groud_truth_average = self.reshape_tensor_to_softmax(
            groud_truth_average)
        super_resolution_average = self.reshape_tensor_to_softmax(
            super_resolution_average)

        gt_relativistic_average = self._compute_binary_crossentropy(
            tf.zeros_like(groud_truth_average),
            apply_softmax(groud_truth_average),
        )
        sr_relativistic_average = self._compute_binary_crossentropy(
            tf.ones_like(super_resolution_average),
            apply_softmax(super_resolution_average),
        )
        return (gt_relativistic_average + sr_relativistic_average) / 2
    def _compute_arcloss(self,
                         embeddings,
                         ground_truth,
                         num_classes: int,
                         net_type: str = 'syn') -> float:
        """Compute the ArcLoss.

        ### Parameters
            embeddings: Batch of Embedding vectors where loss will be calculated on.
            ground_truth: Batch of Ground Truth classes.
            fc_weights: Weights extracted from the last Fully Connected layer of\
            the network (Embedding layer).
            num_classes: Total number of classes in the dataset.
            scale:
            margin:

        ### Returns
            The loss value."""
        original_target_embeddings = embeddings
        cos_theta = original_target_embeddings / self.scale
        theta = tf.acos(cos_theta)

        z = theta + self.margin
        marginal_target_embeddings = tf.cos(z) * self.scale

        one_hot_vector = tf.one_hot(ground_truth, depth=num_classes)

        difference = marginal_target_embeddings - original_target_embeddings
        new_one_hot = one_hot_vector * difference

        softmax_output = apply_softmax(original_target_embeddings +
                                       new_one_hot)
        return self._compute_categorical_crossentropy(softmax_output,
                                                      one_hot_vector)
Пример #3
0
    def compute_arcloss(self, embeddings, ground_truth) -> float:
        """Compute the ArcLoss.

        ### Parameters
            embeddings: Batch of Embedding vectors where loss will be calculated on.
            ground_truth: Batch of Ground Truth classes.

        ### Returns
            The loss value."""
        original_target_embeddings = embeddings
        cos_theta = original_target_embeddings / self.scale
        theta = tf.acos(cos_theta)

        z = theta + self.margin
        marginal_target_embeddings = tf.cos(z) * self.scale

        one_hot_vector = tf.one_hot(ground_truth, depth=self.num_classes)

        difference = marginal_target_embeddings - original_target_embeddings
        new_one_hot = one_hot_vector * difference

        softmax_output = apply_softmax(original_target_embeddings +
                                       new_one_hot)
        return self._compute_categorical_crossentropy(softmax_output,
                                                      one_hot_vector)
Пример #4
0
def test_softmax_zeros_output(zeros_array, softmax_zeros_array_output):
    output = apply_softmax(zeros_array)
    assert (output == softmax_zeros_array_output).numpy().all() == True
Пример #5
0
def test_softmax_output_sum(softmax_input_minimal):
    output = apply_softmax(softmax_input_minimal)
    assert tf.reduce_sum(output).numpy() == 1.0
Пример #6
0
def test_softmax_output(softmax_real_input, softmax_real_output):
    output = apply_softmax(softmax_real_input)
    assert (output == softmax_real_output).numpy().all() == True