示例#1
0
 def forward(self, input):
     """
     Forward pass of the function.
     """
     return (1 / self.alpha) * F.log(
         (1 + F.exp(self.alpha * input)) / (1 + F.exp(self.alpha * (input - 1)))
     )
示例#2
0
    def decode(self, anchors: Tensor, deltas: Tensor) -> Tensor:
        if self.reg_std is not None:
            deltas *= self.reg_std
        if self.reg_mean is not None:
            deltas += self.reg_mean

        (
            anchor_width,
            anchor_height,
            anchor_ctr_x,
            anchor_ctr_y,
        ) = self._box_ltrb_to_cs_opr(anchors, 1)
        pred_ctr_x = anchor_ctr_x + deltas[:, 0::4] * anchor_width
        pred_ctr_y = anchor_ctr_y + deltas[:, 1::4] * anchor_height
        pred_width = anchor_width * F.exp(deltas[:, 2::4])
        pred_height = anchor_height * F.exp(deltas[:, 3::4])

        pred_x1 = pred_ctr_x - 0.5 * pred_width
        pred_y1 = pred_ctr_y - 0.5 * pred_height
        pred_x2 = pred_ctr_x + 0.5 * pred_width
        pred_y2 = pred_ctr_y + 0.5 * pred_height

        pred_box = self._concat_new_axis(pred_x1, pred_y1, pred_x2, pred_y2, 2)
        pred_box = pred_box.reshape(pred_box.shapeof(0), -1)

        return pred_box
示例#3
0
def bbox_transform_inv_opr(bbox, deltas):
    max_delta = math.log(1000.0 / 16)
    """ Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
    bbox_width = bbox[:, 2] - bbox[:, 0] + 1
    bbox_height = bbox[:, 3] - bbox[:, 1] + 1
    bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
    bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
    pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
    pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height

    dw = deltas[:, 2]
    dh = deltas[:, 3]
    dw = F.minimum(dw, max_delta)
    dh = F.minimum(dh, max_delta)
    pred_width = bbox_width * F.exp(dw)
    pred_height = bbox_height * F.exp(dh)

    pred_x1 = pred_ctr_x - 0.5 * pred_width
    pred_y1 = pred_ctr_y - 0.5 * pred_height
    pred_x2 = pred_ctr_x + 0.5 * pred_width
    pred_y2 = pred_ctr_y + 0.5 * pred_height
    # pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
    #                         pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
    pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis=1)
    return pred_boxes
示例#4
0
def smooth_grad_1st(flo, image, alpha):
    img_dx, img_dy = gradient(image)
    weights_x = F.exp(-F.mean(F.abs(img_dx), 1, keepdims=True) * alpha)
    weights_y = F.exp(-F.mean(F.abs(img_dy), 1, keepdims=True) * alpha)

    dx, dy = gradient(flo)

    loss_x = weights_x * F.abs(dx) / 2.0
    loss_y = weights_y * F.abs(dy) / 2.0
    return F.mean(loss_x) / 2.0 + F.mean(loss_y) / 2.0
示例#5
0
 def forward(self, input):
     """
     Forward pass of the function.
     """
     if self.hard is False:
         return (input >= 0).float() * swish_function(
             input, False, False, None, None
         ) + (input < 0).float() * (F.exp(input) - 1) * F.sigmoid(input)
     else:
         return (input >= 0).float() * input * F.max(
             self.a, F.min(self.b, (input + 1.0) / 2.0)
         ) + (input < 0).float() * (
             F.exp(input - 1) * F.max(self.a, F.min(self.b, (input + 1.0) / 2.0))
         )
示例#6
0
文件: losses.py 项目: zzh7982/Models
def _bce_loss_with_logits(output, labels, **kwargs):
    r"""
    Sigmoid cross entropy with logits, see tensorflow
    https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
    """
    loss = F.maximum(output, 0) - output * labels + F.log(1 + F.exp(-F.abs(output)))
    return loss.mean()
示例#7
0
def softmax_loss(pred, label, ignore_label=-1):
    max_pred = F.zero_grad(pred.max(axis=1, keepdims=True))
    pred -= max_pred
    log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
    mask = 1 - F.equal(label, ignore_label)
    vlabel = label * mask
    loss = -(F.indexing_one_hot(log_prob, vlabel, 1) * mask)
    return loss
示例#8
0
def softmax_loss(score, label, ignore_label=-1):
    max_score = F.zero_grad(score.max(axis=1, keepdims=True))
    score -= max_score
    log_prob = score - F.log(F.exp(score).sum(axis=1, keepdims=True))
    mask = (label != ignore_label)
    vlabel = label * mask
    loss = -(F.indexing_one_hot(log_prob, vlabel.astype("int32"), 1) *
             mask).sum()
    loss = loss / F.maximum(mask.sum(), 1)
    return loss
示例#9
0
def softmax_loss_opr(pred, label, ignore_label=-1):

    max_pred = pred.max(axis=1, keepdims=True).detach()
    pred -= max_pred
    log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
    mask = 1 - F.equal(label, ignore_label)
    vlabel = label * mask.astype(np.float32)
    loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32),
                                   1).flatten() * mask)
    return loss
示例#10
0
def softmax_cross_entropy(pred, label, axis=1, ignore_index=255):
    offset = F.zero_grad(pred.max(axis=axis, keepdims=True))
    pred = pred - offset
    log_prob = pred - F.log(F.exp(pred).sum(axis=axis, keepdims=True))

    mask = 1 - F.equal(label, ignore_index)
    vlabel = label * mask
    loss = -(F.indexing_one_hot(log_prob, vlabel, axis) *
             mask).sum() / F.maximum(mask.sum(), 1)
    return loss
示例#11
0
文件: loss.py 项目: klsdjft/Models-1
def softmax_loss(scores: Tensor,
                 labels: Tensor,
                 ignore_label: int = -1) -> Tensor:
    max_scores = F.zero_grad(scores.max(axis=1, keepdims=True))
    scores -= max_scores
    log_prob = scores - F.log(F.exp(scores).sum(axis=1, keepdims=True))
    mask = labels != ignore_label
    vlabels = labels * mask
    loss = -(F.indexing_one_hot(log_prob, vlabels.astype("int32"), 1) *
             mask).sum()
    loss = loss / F.maximum(mask.sum(), 1)
    return loss
示例#12
0
    def forward(self, x):
        """
        Forward pass of the function
        """
        if self.alpha == 0.0:
            return x

        if self.alpha < 0.0:
            return -F.log(1 - self.alpha * (x + self.alpha)) / self.alpha

        if self.alpha > 0.0:
            return (F.exp(self.alpha * x) - 1) / self.alpha + self.alpha
示例#13
0
    def encoding(self, x):
        for conv in self.convs:
            x = conv(x)
            print('encoding', x.shape)

        print('fmap', x.shape)
        #code = self.gap(x)
        #code = code.reshape(-1, code.shape[1])
        code = x.dimshuffle(0, 2, 3, 1)

        print(code.shape)
        mean = self.fc_mean(code).dimshuffle(0, 3, 1, 2)
        var = 1e-5 + F.exp(self.fc_var(code)).dimshuffle(0, 3, 1, 2)

        return mean, var
示例#14
0
    def decode(self, anchors: Tensor, deltas: Tensor) -> Tensor:
        deltas *= self.reg_std
        deltas += self.reg_mean

        (
            anchor_width,
            anchor_height,
            anchor_ctr_x,
            anchor_ctr_y,
        ) = self._box_ltrb_to_cs_opr(anchors, 1)
        pred_ctr_x = anchor_ctr_x + deltas[:, 0::4] * anchor_width
        pred_ctr_y = anchor_ctr_y + deltas[:, 1::4] * anchor_height
        pred_width = anchor_width * F.exp(deltas[:, 2::4])
        pred_height = anchor_height * F.exp(deltas[:, 3::4])

        pred_x1 = pred_ctr_x - 0.5 * pred_width
        pred_y1 = pred_ctr_y - 0.5 * pred_height
        pred_x2 = pred_ctr_x + 0.5 * pred_width
        pred_y2 = pred_ctr_y + 0.5 * pred_height

        pred_box = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis=2)
        pred_box = pred_box.reshape(pred_box.shape[0], -1)

        return pred_box
示例#15
0
    def decode_outputs(self, outputs):
        grids = []
        strides = []
        for (hsize, wsize), stride in zip(self.hw, self.strides):
            xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
            grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
            grids.append(grid)
            shape = grid.shape[:2]
            strides.append(F.full((*shape, 1), stride))

        grids = F.concat(grids, axis=1)
        strides = F.concat(strides, axis=1)

        outputs[..., :2] = (outputs[..., :2] + grids) * strides
        outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides
        return outputs
示例#16
0
    def get_output_and_grid(self, output, k, stride, dtype):
        grid = self.grids[k]

        batch_size = output.shape[0]
        n_ch = 5 + self.num_classes
        hsize, wsize = output.shape[-2:]
        if grid.shape[2:4] != output.shape[2:4]:
            yv, xv = meshgrid([F.arange(hsize), F.arange(wsize)])
            grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize,
                                                2).type(dtype)
            self.grids[k] = grid

        output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
        output = (output.permute(0, 1, 3, 4,
                                 2).reshape(batch_size,
                                            self.n_anchors * hsize * wsize,
                                            -1))
        grid = grid.view(1, -1, 2)
        output[..., :2] = (output[..., :2] + grid) * stride
        output[..., 2:4] = F.exp(output[..., 2:4]) * stride
        return output, grid
示例#17
0
 def forward(self, a):
     # add
     if self.mode == "add":
         x = a + mge.tensor(np.float32(10))
         y = a + mge.tensor(self.data1)
         z = x + y
     # sub
     elif self.mode == "sub":
         x = a - mge.tensor(np.float32(10))
         y = a - mge.tensor(self.data1)
         z = x - y
     # mul
     elif self.mode == "mul":
         x = a * mge.tensor(np.float32(10))
         y = mge.tensor(self.data1) * a
         z = x * y
     # div
     elif self.mode == "div":
         y = mge.tensor(self.data1) / a
         x = a / mge.tensor(np.float32(2))
         z = y / x
     # cycle_div
     elif self.mode == "cycle_div":
         z = a / mge.tensor(self.data1)
     # abs
     elif self.mode == "abs":
         z = F.abs(a)
     # exp
     elif self.mode == "exp":
         z = F.exp(a)
     # log
     elif self.mode == "log":
         z = F.log(a)
     else:
         raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
     return z
示例#18
0
 def forward(self, x):
     x = x * self.a
     # will result into a copy of output in grad
     x = F.exp(x)
     return x
def test_elemementwise():
    a = Tensor(1.0)
    assert F.exp(a).ndim == 0
    assert (a + a).ndim == 0
    assert (a + 1).ndim == 0
示例#20
0
 def g(x):
     return log(exp(x))
示例#21
0
 def f(x):
     return exp(x)
示例#22
0
 def g(x, y):
     return log(exp(x) + exp(y))
示例#23
0
 def f(x):
     return log(exp(x))
示例#24
0
    def forward(self, features, label=None, mask=None):
        """
        if label and mask both None, the loss will degenerate to
        SimSLR unsupervised loss.
        Reference:
            "A Simple Framework for Contrastive Learning of Visual Representations"<https://arxiv.org/pdf/2002.05709.pdf>
            "Supervised Contrastive Learning"<https://arxiv.org/abs/2004.11362>
        Args:
            features(tensor): The embedding feature. shape=[bs, n_views, ...]
            label(tensor): The label of images, shape=[bs]
            mask(tensor): contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
                has the same class as sample i. Can be asymmetric.
        return:
            loss
        """
        if len(features.shape) < 3:
            raise ValueError("Features need have 3 dimensions at least")
        bs, num_view = features.shape[:2]
        #if dimension > 3, change the shape of the features to [bs, num_view, ...]
        if len(features.shape) > 3:
            features = features.reshape(bs, num_view, -1)

        #label and mask cannot provided at the same time
        if (label is not None) and (mask is not None):
            raise ValueError("label and mask cannot provided at the same time")
        elif (label is None) and (mask is None):
            mask = F.eye(bs, dtype="float32")
        elif label is not None:
            label = label.reshape(-1, 1)
            if label.shape[0] != bs:
                raise RuntimeError(
                    "Num of labels does not match num of features")
            mask = F.equal(label, label.T)
        else:
            mask = mask.astype("float32")

        contrast_count = features.shape[1]
        features = F.split(features, features.shape[1], axis=1)
        contrast_feature = F.squeeze(F.concat(features, axis=0), axis=1)
        if self.contrast_mode == "one":
            anchor_feature = features[:, 0]
            anchor_count = 1
        elif self.contrast_mode == "all":
            anchor_feature = contrast_feature
            anchor_count = contrast_count
        else:
            raise ValueError("Unknown mode:{}".format(self.contrast_mode))
        #compute logits
        anchor_dot_contrast = F.div(
            F.matmul(anchor_feature, contrast_feature.T), self.temperate)

        #for numerical stability
        logits_max = F.max(anchor_dot_contrast, axis=-1, keepdims=True)
        logits = anchor_dot_contrast - logits_max

        #tile mask
        an1, con = mask.shape[:2]
        nums = anchor_count * contrast_count
        # mask-out self-contrast cases
        mask = F.stack([mask] * nums).reshape(an1 * anchor_count,
                                              con * contrast_count)
        logits_mask = F.scatter(
            F.ones_like(mask), 1,
            F.arange(0, int(bs * anchor_count), dtype="int32").reshape(-1, 1),
            F.zeros(int(bs * anchor_count), dtype="int32").reshape(-1, 1))
        mask = mask * logits_mask
        #compute log_prob
        exp_logits = F.exp(logits) * logits_mask
        log_prob = logits - F.log(F.sum(exp_logits, axis=1,
                                        keepdims=True))  #equation 2

        #mean
        mean_log_prob_pos = F.sum(mask * log_prob, axis=1) / F.sum(mask,
                                                                   axis=1)

        #loss
        loss = -(self.temperate / self.base_temperate) * mean_log_prob_pos
        loss = F.mean(loss.reshape(anchor_count, bs))
        return loss
示例#25
0
 def forward(self, input):
     """
     Forward pass of the function.
     """
     return F.pow((1 + F.exp(-self.beta * input)), -self.alpha)
示例#26
0
def softplus(x: Tensor) -> Tensor:
    return F.log(1 + F.exp(-F.abs(x))) + F.relu(x)
示例#27
0
    def forward(self, a):
        # add
        if self.mode == "add":
            x = a + mge.tensor(np.float32(10))
            y = a + mge.tensor(self.data1)
            z = x + y
        # sub
        elif self.mode == "sub":
            x = a - mge.tensor(np.float32(10))
            y = a - mge.tensor(self.data1)
            z = x - y
        # mul
        elif self.mode == "mul":
            x = a * mge.tensor(np.float32(10))
            y = mge.tensor(self.data1) * a
            z = x * y
        # div
        elif self.mode == "max":
            x = a + mge.tensor(self.data)
            y = a + mge.tensor(self.data2)
            z = F.maximum(x, y)
        elif self.mode == "min":
            x = a + mge.tensor(self.data)
            y = a + mge.tensor(self.data2)
            z = F.minimum(x, y)

        elif self.mode == "pow":
            z = a**2

        elif self.mode == "ceil":
            z = F.ceil(a)

        elif self.mode == "floor":
            z = F.floor(a)

        elif self.mode == "div":
            y = mge.tensor(self.data1) / a
            x = a / mge.tensor(np.float32(2))
            z = y / x
        # cycle_div
        elif self.mode == "cycle_div":
            z = a / mge.tensor(self.data1)
        # abs
        elif self.mode == "abs":
            z = F.abs(a)
        # exp
        elif self.mode == "exp":
            z = F.exp(a)
        # log
        elif self.mode == "log":
            z = F.log(a)
        elif self.mode == "fuse_add_relu":
            y = a + mge.tensor(self.data2)
            z = F.relu(y)
        elif self.mode == "fuse_mul_add3":
            y = a * mge.tensor(self.data1)
            z = y + mge.tensor(self.data2)
        elif self.mode == "fuse_add_sigmoid":
            y = a + mge.tensor(self.data2)
            z = F.sigmoid(y)
        else:
            raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
        return z
示例#28
0
 def f(x):
     return F.exp(x)
示例#29
0
    def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes,
                   im_info):
        # pylint: disable=too-many-statements
        def positive_bag_loss(logits, axis=1):
            weight = 1.0 / (1.0 - logits)
            weight /= weight.sum(axis=axis, keepdims=True)
            bag_prob = (weight * logits).sum(axis=1)
            return -layers.safelog(bag_prob)

        def negative_bag_loss(logits, gamma):
            return (logits**gamma) * (-layers.safelog(1.0 - logits))

        pred_scores = F.sigmoid(pred_logits)
        box_prob_list = []
        positive_losses = []
        clamp_eps = 1e-7
        bucket_size = self.cfg.bucket_size

        for bid in range(im_info.shape[0]):
            boxes_info = gt_boxes[bid, :im_info[bid, 4].astype("int32")]
            # id 0 is used for background classes, so -1 first
            labels = boxes_info[:, 4].astype("int32") - 1

            pred_box = self.box_coder.decode(anchors,
                                             pred_offsets[bid]).detach()
            overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
            thresh1 = self.cfg.box_iou_threshold
            thresh2 = F.clip(overlaps.max(axis=1, keepdims=True),
                             lower=thresh1 + clamp_eps,
                             upper=1.0)
            gt_pred_prob = F.clip((overlaps - thresh1) / (thresh2 - thresh1),
                                  lower=0,
                                  upper=1.0)

            image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
            # guarantee that nonzero_idx is not empty
            if gt_pred_prob.max() > clamp_eps:
                _, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
                # since nonzeros is only 1 dim, use num_anchor to get real indices
                num_anchors = gt_pred_prob.shape[1]
                anchors_idx = nonzero_idx % num_anchors
                gt_idx = nonzero_idx // num_anchors
                image_boxes_prob[anchors_idx,
                                 labels[gt_idx]] = gt_pred_prob[gt_idx,
                                                                anchors_idx]

            box_prob_list.append(image_boxes_prob)

            # construct bags for objects
            match_quality_matrix = layers.get_iou(boxes_info[:, :4],
                                                  anchors).detach()
            num_gt = match_quality_matrix.shape[0]
            _, matched_idx = F.topk(
                match_quality_matrix,
                k=bucket_size,
                descending=True,
                no_sort=True,
            )

            matched_idx = matched_idx.detach()
            matched_idx_flatten = matched_idx.reshape(-1)
            gather_idx = labels.reshape(-1, 1)
            gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))

            gather_src = pred_scores[bid, matched_idx_flatten]
            gather_src = gather_src.reshape(num_gt, bucket_size, -1)
            matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)

            topk_anchors = anchors[matched_idx_flatten]
            boxes_broad_cast = F.broadcast_to(
                F.expand_dims(boxes_info[:, :4], axis=1),
                (num_gt, bucket_size, 4)).reshape(-1, 4)

            matched_offsets = self.box_coder.encode(topk_anchors,
                                                    boxes_broad_cast)

            reg_loss = layers.smooth_l1_loss(
                pred_offsets[bid, matched_idx_flatten],
                matched_offsets,
                beta=self.cfg.smooth_l1_beta).sum(
                    axis=-1) * self.cfg.reg_loss_weight
            matched_reg_scores = F.exp(-reg_loss)

            positive_losses.append(
                positive_bag_loss(matched_score *
                                  matched_reg_scores.reshape(-1, bucket_size),
                                  axis=1))

        num_foreground = im_info[:, 4].sum()
        pos_loss = F.concat(positive_losses).sum() / F.maximum(
            1.0, num_foreground)
        box_probs = F.stack(box_prob_list, axis=0)

        neg_loss = negative_bag_loss(
            pred_scores *
            (1 - box_probs), self.cfg.focal_loss_gamma).sum() / F.maximum(
                1.0, num_foreground * bucket_size)

        alpha = self.cfg.focal_loss_alpha
        pos_loss = pos_loss * alpha
        neg_loss = neg_loss * (1 - alpha)
        loss_dict = {
            "total_loss": pos_loss + neg_loss,
            "pos_loss": pos_loss,
            "neg_loss": neg_loss,
        }
        return loss_dict
示例#30
0
 def forward(self, x):
     y = 1 / (1 + F.exp(-x))
     self.save_for_backward(y)
     return y