예제 #1
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1706.03847.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            TopOne Max loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives], broadcast=False)
        softmax_scores = Softmax()((negatives, tf.to_float(mask)))
        losses = tf.multiply(softmax_scores, tf.nn.sigmoid(negatives - positives) + tf.nn.sigmoid(tf.square(negatives)))
        # One loss per event, average of scores : (batch, num_events)
        event_scores = WeightedAverage()((losses, tf.to_float(mask)))
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses), tf.reduce_sum(event_weights))
예제 #2
0
파일: reduce.py 프로젝트: mindis/deepr
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer"""
        values, weights = tensors

        # Values and weights need to have the same shape up to axis
        # and compatible after axis
        axis = len(weights.shape) - 1
        weights = tf.broadcast_to(weights,
                                  tf.shape(values)[:len(weights.shape)])
        values, weights = make_same_shape([values, weights], broadcast=False)

        # Reduce weighted values and weights
        weighted_values = tf.reduce_sum(values * weights, axis=axis)
        sum_weights = tf.reduce_sum(weights, axis=axis)

        # Average values and weights, take care of all weights zeros
        if self.default is None:
            return weighted_values / sum_weights
        elif self.default == 0:
            weighted_average = tf.div_no_nan(weighted_values, sum_weights)
        else:
            weighted_average = tf.where(
                tf.equal(sum_weights, 0),
                self.default * tf.ones_like(weighted_values),
                weighted_values / sum_weights)
        return weighted_average
예제 #3
0
    def forward(self, tensors, mode: str = None):
        """Computes Triplet Precision

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        # Retrieve positives and negatives logits
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)

        # One triplet precision per event
        event_triplet = WeightedAverage()((tf.cast(
            positives > negatives, tf.float32), tf.cast(mask, tf.float32)),
                                          mode)

        # Each event contributes according to its weight
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        return tf.math.divide_no_nan(
            tf.reduce_sum(input_tensor=event_triplet * event_weights),
            tf.reduce_sum(input_tensor=event_weights))
예제 #4
0
파일: top_one.py 프로젝트: Jasputtar/deepr
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1706.03847.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            Top1 loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        losses = tf.nn.sigmoid(negatives - positives) + tf.nn.sigmoid(
            tf.square(negatives))
        event_scores = WeightedAverage()(
            (losses, tf.cast(mask, dtype=tf.float32)))
        # Each event contributes according to its weight
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        event_losses = event_scores * event_weights
        return tf.math.divide_no_nan(tf.reduce_sum(input_tensor=event_losses),
                                     tf.reduce_sum(input_tensor=event_weights))
예제 #5
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)

        Returns
        -------
        tf.Tensor
            BPR Max loss
        """
        positives, negatives = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        softmax_scores = Softmax()((negatives, tf.ones_like(negatives)))
        losses = -tf.log(
            tf.reduce_sum(
                tf.multiply(softmax_scores,
                            tf.nn.sigmoid(positives - negatives)), -1))
        # add bpr_max regularisation
        bpr_regularization = tf.multiply(
            tf.constant(self.bpr_max_regularizer, dtype=tf.float32),
            tf.reduce_sum(tf.multiply(softmax_scores, tf.square(negatives)),
                          -1),
        )
        scores = losses + bpr_regularization
        return Average()(scores, mode)
예제 #6
0
파일: core.py 프로젝트: maofeng1709/deepr
 def forward(self, tensors, mode: str = None):
     """Forward method of the layer"""
     tensors = make_same_shape(tensors, broadcast=False)
     acc = 1
     for inp in tensors:
         acc *= inp
     return acc
예제 #7
0
파일: bpr.py 프로젝트: denkuzin/deepr
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        # One score per negative : (batch, num_events, num_negatives)
        scores = -tf.log_sigmoid(positives - negatives)
        # One loss per event, average of scores : (batch, num_events)
        event_scores = WeightedAverage()((scores, tf.to_float(mask)))
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses),
                             tf.reduce_sum(event_weights))
예제 #8
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        # Retrieve positives and negatives logits
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)

        # One score per event
        event_scores = WeightedAverage()(
            (-tf.math.log_sigmoid(positives - negatives),
             tf.cast(mask, dtype=tf.float32)))

        # Each event contributes according to its weight
        event_weights = weights * tf.cast(
            tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        return tf.math.divide_no_nan(
            tf.reduce_sum(input_tensor=event_scores * event_weights),
            tf.reduce_sum(input_tensor=event_weights))
예제 #9
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives: shape = (batch, num_events)
            - negatives: shape = (batch, num_events, num_negatives)
            - mask: shape = (batch, num_events, num_negatives)

        Returns
        -------
        tf.Tensor
            ClickRank
        """
        positives, negatives, mask = tensors
        # One score per negative : (batch, num_events, num_negative)
        positives, negatives = make_same_shape([positives, negatives], broadcast=False)
        positives_greater_negatives = tf.greater(positives, negatives)
        # One score per event, average of ranks : (batch, num_events)
        eps = 1e-8
        mask_float = tf.cast(mask, dtype=tf.float32)
        negatives_sum = tf.reduce_sum(input_tensor=tf.cast(positives_greater_negatives, dtype=tf.float32) * mask_float, axis=-1)
        # In case no negatives, click rank would be 0.5 (random).
        # Events with no negatives are then removed via masking, so it
        # should not impact the final loss in any way.
        event_ranks = 1.0 - (negatives_sum + eps) / (tf.reduce_sum(input_tensor=mask_float, axis=-1) + eps * 2)
        # Each event contributes according to it weight
        event_mask = tf.cast(tf.reduce_any(input_tensor=mask, axis=-1), dtype=tf.float32)
        event_ranks = event_ranks * event_mask
        return tf.reduce_sum(input_tensor=event_ranks) / tf.reduce_sum(input_tensor=event_mask)
예제 #10
0
 def forward(self, tensors, mode: str = None):
     """Forward method of the layer"""
     tensors = [
         tensor if tensor.dtype == tf.string else tf.as_string(tensor)
         for tensor in tensors
     ]
     tensors = make_same_shape(tensors)
     return tf.string_join(tensors, separator=self.separator)
예제 #11
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1706.03847.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR Max loss
        """
        positives, negatives, mask, weights = tensors
        mask = tf.cast(mask, tf.float32)
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        no_sampled_logits = tf.cast(
            tf.greater_equal(tf.reduce_sum(mask, -1), 0), tf.float32)
        softmax_scores = Softmax()((negatives, mask))
        # compute bpr_max losses
        losses = -tf.multiply(
            no_sampled_logits,
            tf.log(
                tf.reduce_sum(
                    tf.multiply(
                        tf.multiply(softmax_scores,
                                    tf.nn.sigmoid(positives -
                                                  negatives)), mask), 2) +
                1e-8),
        )
        # compute regularization part
        bpr_regularization = tf.multiply(
            tf.constant(self.bpr_max_regularizer, dtype=tf.float32),
            tf.reduce_sum(
                tf.multiply(tf.multiply(softmax_scores, tf.square(negatives)),
                            mask), 2),
        )
        losses_with_regularization = losses + bpr_regularization
        # One loss per event, average of scores : (batch, num_events)
        # TODO: fix this line, it seems it's doing averaging twice
        # event_scores = WeightedAverage()((losses_with_regularization, mask))
        event_scores = losses_with_regularization
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(
            tf.reduce_any(tf.cast(mask, tf.bool), axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses),
                             tf.reduce_sum(event_weights))
예제 #12
0
파일: reduce.py 프로젝트: denkuzin/deepr
 def forward(self, tensors, mode: str = None):
     """Forward method of the layer"""
     values, weights = tensors
     axis = len(weights.shape) - 1
     values, weights = make_same_shape([values, weights], broadcast=False)
     weighted_values = tf.reduce_sum(values * weights, axis=axis)
     sum_weights = tf.reduce_sum(weights, axis=axis)
     if self.default is None:
         return weighted_values / sum_weights
     elif self.default == 0:
         weighted_average = tf.div_no_nan(weighted_values, sum_weights)
     else:
         weighted_average = tf.where(
             tf.equal(sum_weights, 0), self.default * tf.ones_like(weighted_values), weighted_values / sum_weights
         )
     return weighted_average
예제 #13
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)

        Returns
        -------
        tf.Tensor
            Top1 loss
        """
        positives, negatives = tensors
        positives, negatives = make_same_shape([positives, negatives], broadcast=False)
        losses = tf.nn.sigmoid(negatives - positives) + tf.nn.sigmoid(tf.square(negatives))
        return Average()(losses, mode)
예제 #14
0
파일: bpr.py 프로젝트: denkuzin/deepr
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives: shape = [batch]
            - negatives: shape = [batch]

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        positives, negatives = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        losses = -tf.log_sigmoid(positives - negatives)
        return Average()(losses, mode)
예제 #15
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1205.2618.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        positives, negatives = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        losses = -tf.math.log_sigmoid(positives - negatives)
        return Average()(losses, mode)
예제 #16
0
파일: core.py 프로젝트: maofeng1709/deepr
def LogicalOr(tensors):
    """Perform logical_or on two tensors of compatible shapes."""
    t1, t2 = make_same_shape(tensors, broadcast=False)
    return tf.logical_or(t1, t2)
예제 #17
0
파일: core.py 프로젝트: maofeng1709/deepr
def Add(tensors):
    """Add two tensors of any compatible shapes."""
    t1, t2 = make_same_shape(tensors, broadcast=False)
    return t1 + t2