Exemplo n.º 1
0
def range_encode(value, min, max, steps):
    value = torch.Tensor([value])
    range_space = torch.linspace(min, max, steps)
    greater = torch.greater_equal(value, range_space).sum()
    encoded = torch.zeros_like(range_space)
    encoded[greater-1] += 1
    return encoded
Exemplo n.º 2
0
 def comparison_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     return (
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.greater_equal(a, b),
         torch.gt(a, b),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
Exemplo n.º 3
0
def general_weibull_pdf(x, shape, loc, scale):
    y = x - loc
    key = torch.greater_equal(y, 0)
    pdf = torch.zeros_like(y)
    pdf[key] = (shape[key] / scale[key]) * (y[key] / scale[key])**(
        shape[key] - 1) * torch.exp(-(y[key] / scale[key])**shape[key])
    return pdf
Exemplo n.º 4
0
    def forward(self, inputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:
        logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, self.logits_key)
        probabilities = torch.sigmoid(logits)

        predictions = torch.greater_equal(probabilities, self.threshold)
        predictions = predictions.type(torch.int64)

        return {self.predictions_key: predictions, self.probabilities_key: probabilities, self.logits_key: logits}
Exemplo n.º 5
0
 def forward(self):
     a = torch.tensor(0)
     b = torch.tensor(1)
     return len(
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.eq(a, 1),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.ge(a, 1),
         torch.greater_equal(a, b),
         torch.greater_equal(a, 1),
         torch.gt(a, b),
         torch.gt(a, 1),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.le(a, 1),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.lt(a, 1),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.ne(a, 1),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
Exemplo n.º 6
0
    def get_current_value(self, preds: Tensor, target: Tensor) -> Tensor:
        # notation: b is batch size and nc is number of unique elements in the set
        # preds: shape [b, nc] probabilities for each class
        # target: shape [b, nc] bit-mapped set representation
        preds = torch.greater_equal(preds, self.threshold)  # now bit-mapped set
        target = target.type(torch.bool)

        intersection = torch.sum(torch.logical_and(target, preds).type(torch.float32), dim=-1)
        union = torch.sum(torch.logical_or(target, preds).type(torch.float32), dim=-1)

        return intersection / union  # shape [b]
Exemplo n.º 7
0
def reduced_sigmoid_focal_loss(pred,
                               target,
                               weight=None,
                               gamma=2.0,
                               alpha=0.25,
                               reduction='mean',
                               avg_factor=None,
                               threshold=0.5):
    """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.

    Args:
        pred (torch.Tensor): The prediction with shape (N, C), C is the
            number of classes
        target (torch.Tensor): The learning label of the prediction.
        weight (torch.Tensor, optional): Sample-wise loss weight.
        gamma (float, optional): The gamma for calculating the modulating
            factor. Defaults to 2.0.
        alpha (float, optional): A balanced form for Focal Loss.
            Defaults to 0.25.
        reduction (str, optional): The method used to reduce the loss into
            a scalar. Defaults to 'mean'.
        avg_factor (int, optional): Average factor that is used to average
            the loss. Defaults to None.
    """
    # t = torch.nn.functional.one_hot(target.long()).float()
    l = nn.BCEWithLogitsLoss(reduction='none')
    ce = l(pred, target.float())
    pred_sigmoid = pred.sigmoid()

    pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
    modulating_factor = torch.greater_equal(
        pt, threshold).float() + torch.less(pt, threshold).float() * (
            pt).pow(gamma) / torch.tensor(threshold).pow(gamma)
    focal_weight = (alpha * target + (1 - alpha) *
                    (1 - target)) * modulating_factor
    loss = ce * focal_weight

    if weight is not None:
        if weight.shape != loss.shape:
            if weight.size(0) == loss.size(0):
                # For most cases, weight is of shape (num_priors, ),
                #  which means it does not have the second axis num_class
                weight = weight.view(-1, 1)
            else:
                # Sometimes, weight per anchor per class is also needed. e.g.
                #  in FSAF. But it may be flattened of shape
                #  (num_priors x num_class, ), while loss is still of shape
                #  (num_priors, num_class).
                assert weight.numel() == loss.numel()
                weight = weight.view(loss.size(0), -1)
        assert weight.ndim == loss.ndim
    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
    return loss
Exemplo n.º 8
0
def focal_loss_for_heat_map(labels,
                            logits,
                            pos_threshold=0.99,
                            alpha=2,
                            beta=4,
                            sum=True):
    '''
    focal loss for heat map, for example CenterNet2's heat map loss
    '''
    logits = logits.to(torch.float32)
    zeros = torch.zeros_like(labels)
    ones = torch.ones_like(labels)
    num_pos = torch.sum(
        torch.where(torch.greater_equal(labels, pos_threshold), ones, zeros))

    probs = F.sigmoid(logits)
    pos_weight = torch.where(torch.greater_equal(labels, pos_threshold),
                             ones - probs, zeros)
    neg_weight = torch.where(torch.less(labels, pos_threshold), probs, zeros)
    '''
    用于保证数值稳定性,log(sigmoid(x)) = log(1/(1+e^-x) = -log(1+e^-x) = x-x-log(1+e^-x) = x-log(e^x +1)
    pos_loss = tf.where(tf.less(logits,0),logits-tf.log(tf.exp(logits)+1),tf.log(probs))
    '''
    pure_pos_loss = -torch.minimum(
        logits, logits.new_tensor(0, dtype=logits.dtype)) + torch.log(
            1 + torch.exp(-torch.abs(logits)))
    pos_loss = pure_pos_loss * torch.pow(pos_weight, alpha)
    if sum:
        pos_loss = torch.sum(pos_loss)
    '''
    用于保证数值稳定性
    '''
    pure_neg_loss = F.relu(logits) + torch.log(1 +
                                               torch.exp(-torch.abs(logits)))
    neg_loss = torch.pow(
        (1 - labels), beta) * torch.pow(neg_weight, alpha) * pure_neg_loss
    if sum:
        neg_loss = torch.sum(neg_loss)
    loss = (pos_loss + neg_loss) / (num_pos + 1e-4)
    return loss
Exemplo n.º 9
0
    def predictions(self, inputs, feature_name, **kwargs):
        logits = output_feature_utils.get_output_feature_tensor(
            inputs, feature_name, LOGITS)
        probabilities = torch.sigmoid(logits)

        predictions = torch.greater_equal(probabilities, self.threshold)
        predictions = predictions.type(torch.int64)

        return {
            PREDICTIONS: predictions,
            PROBABILITIES: probabilities,
            LOGITS: logits
        }
Exemplo n.º 10
0
def control_point_l1_loss_better_than_threshold(pred_control_points,
                                                gt_control_points,
                                                confidence,
                                                confidence_threshold,
                                                device="cpu"):
    npoints = pred_control_points.shape[1]
    mask = torch.greater_equal(confidence, confidence_threshold)
    mask_ratio = torch.mean(mask)
    mask = torch.repeat_interleave(mask, npoints, dim=1)
    p1 = pred_control_points[mask]
    p2 = gt_control_points[mask]

    return control_point_l1_loss(p1, p2), mask_ratio
Exemplo n.º 11
0
def min_distance_better_than_threshold(pred_control_points,
                                       gt_control_points,
                                       confidence,
                                       confidence_threshold,
                                       device="cpu"):
    error = torch.expand_dims(pred_control_points, 1) - torch.expand_dims(
        gt_control_points, 0)
    error = torch.sum(torch.abs(error),
                      -1)  # L1 distance of error (N_pred, N_gt, M)
    error = torch.mean(
        error, -1)  # average L1 for all the control points. (N_pred, N_gt)
    error = torch.min(error, -1)  # (B, N_pred)
    mask = torch.greater_equal(confidence, confidence_threshold)
    mask = torch.squeeze(mask, dim=-1)

    return torch.mean(error[mask]), torch.mean(mask)
Exemplo n.º 12
0
def accuracy_better_than_threshold(pred_success_logits,
                                   gt,
                                   confidence,
                                   confidence_threshold,
                                   device="cpu"):
    """
      Computes average precision for the grasps with confidence > threshold.
    """
    pred_classes = torch.argmax(pred_success_logits, -1)
    correct = torch.equal(pred_classes, gt)
    mask = torch.squeeze(torch.greater_equal(confidence, confidence_threshold),
                         -1)

    positive_acc = torch.sum(correct * mask * gt) / torch.max(
        torch.sum(mask * gt), torch.tensor(1))
    negative_acc = torch.sum(correct * mask * (1. - gt)) / torch.max(
        torch.sum(mask * (1. - gt)), torch.tensor(1))

    return 0.5 * (positive_acc + negative_acc), torch.sum(mask) / gt.shape[0]
Exemplo n.º 13
0
            **kwargs
    ):
<<<<<<< HEAD
        logits = inputs[LOGITS]

=======
        logits = output_feature_utils.get_output_feature_tensor(
            inputs, feature_name, LOGITS)
>>>>>>> upstream/master
=======
    def predictions(self, inputs, feature_name, **kwargs):
        logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, LOGITS)
>>>>>>> upstream/master
        probabilities = torch.sigmoid(logits)

        predictions = torch.greater_equal(probabilities, self.threshold)
        predictions = predictions.type(torch.int64)

        return {PREDICTIONS: predictions, PROBABILITIES: probabilities, LOGITS: logits}

    def loss_kwargs(self):
        return self.loss

    def get_prediction_set(self):
        return {PREDICTIONS, PROBABILITIES, LOGITS}

    @classmethod
    def get_output_dtype(cls):
        return torch.bool
<<<<<<< HEAD
=======
Exemplo n.º 14
0
 def __ge__(self, other):
     x0, x1 = self._to_binary_tensor_args(other)
     y = torch.greater_equal(x0._t, x1._t)
     s = _ox.greater_equal(*_EagerTensor.ox_args([x0, x1]))
     return self.from_torch(y, s)
Exemplo n.º 15
0
def greater_equal(a, b):
    return torch.greater_equal(a, b)
Exemplo n.º 16
0
def weibull_pdf(x, shape, scale):
    key = torch.greater_equal(x, 0)
    pdf = torch.zeros_like(x)
    pdf[key] = (shape[key] / scale[key]) * (x[key] / scale[key])**(
        shape[key] - 1) * torch.exp(-(x[key] / scale[key])**shape[key])
    return pdf