Beispiel #1
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            Negative Sampling  loss
        """
        positives, negatives, mask, weights = tensors
        true_losses = tf.nn.sigmoid_cross_entropy_with_logits(
            labels=tf.ones_like(positives), logits=positives)
        sampled_losses = tf.nn.sigmoid_cross_entropy_with_logits(
            labels=tf.zeros_like(negatives), logits=negatives)
        event_scores = true_losses + WeightedAverage()(
            (sampled_losses, tf.cast(mask, dtype=tf.float32)))
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        return tf.math.divide_no_nan(
            tf.reduce_sum(input_tensor=event_scores * event_weights),
            tf.reduce_sum(input_tensor=event_weights))
Beispiel #2
0
    def forward(self, tensors, mode: str = None):
        """Computes Triplet Precision

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        # Retrieve positives and negatives logits
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)

        # One triplet precision per event
        event_triplet = WeightedAverage()((tf.cast(
            positives > negatives, tf.float32), tf.cast(mask, tf.float32)),
                                          mode)

        # Each event contributes according to its weight
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        return tf.math.divide_no_nan(
            tf.reduce_sum(input_tensor=event_triplet * event_weights),
            tf.reduce_sum(input_tensor=event_weights))
Beispiel #3
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1706.03847.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            TopOne Max loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives], broadcast=False)
        softmax_scores = Softmax()((negatives, tf.to_float(mask)))
        losses = tf.multiply(softmax_scores, tf.nn.sigmoid(negatives - positives) + tf.nn.sigmoid(tf.square(negatives)))
        # One loss per event, average of scores : (batch, num_events)
        event_scores = WeightedAverage()((losses, tf.to_float(mask)))
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses), tf.reduce_sum(event_weights))
Beispiel #4
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        # One score per negative : (batch, num_events, num_negatives)
        scores = -tf.log_sigmoid(positives - negatives)
        # One loss per event, average of scores : (batch, num_events)
        event_scores = WeightedAverage()((scores, tf.to_float(mask)))
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses),
                             tf.reduce_sum(event_weights))
Beispiel #5
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        # Retrieve positives and negatives logits
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)

        # One score per event
        event_scores = WeightedAverage()(
            (-tf.math.log_sigmoid(positives - negatives),
             tf.cast(mask, dtype=tf.float32)))

        # Each event contributes according to its weight
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        return tf.math.divide_no_nan(
            tf.reduce_sum(input_tensor=event_scores * event_weights),
            tf.reduce_sum(input_tensor=event_weights))
Beispiel #6
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1706.03847.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            Top1 loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        losses = tf.nn.sigmoid(negatives - positives) + tf.nn.sigmoid(
            tf.square(negatives))
        event_scores = WeightedAverage()(
            (losses, tf.cast(mask, dtype=tf.float32)))
        # Each event contributes according to its weight
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        event_losses = event_scores * event_weights
        return tf.math.divide_no_nan(tf.reduce_sum(input_tensor=event_losses),
                                     tf.reduce_sum(input_tensor=event_weights))