Example #1
0
def test_abs():
    np.testing.assert_allclose(
        F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
        np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
    )

    np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
Example #2
0
def test_abs():
    assertTensorClose(
        F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
        np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
    )

    assertTensorClose(F.abs(-3.0), np.abs(np.float32(-3.0)))
Example #3
0
def smooth_grad_1st(flo, image, alpha):
    img_dx, img_dy = gradient(image)
    weights_x = F.exp(-F.mean(F.abs(img_dx), 1, keepdims=True) * alpha)
    weights_y = F.exp(-F.mean(F.abs(img_dy), 1, keepdims=True) * alpha)

    dx, dy = gradient(flo)

    loss_x = weights_x * F.abs(dx) / 2.0
    loss_y = weights_y * F.abs(dy) / 2.0
    return F.mean(loss_x) / 2.0 + F.mean(loss_y) / 2.0
Example #4
0
    def get_ground_truth(self, anchors, batched_gt_boxes, batched_valid_gt_box_number):
        total_anchors = anchors.shape[0]
        labels_cat_list = []
        bbox_targets_list = []

        for b_id in range(self.batch_size):
            gt_boxes = batched_gt_boxes[b_id, : batched_valid_gt_box_number[b_id]]

            overlaps = layers.get_iou(anchors, gt_boxes[:, :4])
            argmax_overlaps = F.argmax(overlaps, axis=1)

            max_overlaps = overlaps.ai[
                F.linspace(0, total_anchors - 1, total_anchors).astype(np.int32),
                argmax_overlaps,
            ]

            labels = mge.tensor([-1]).broadcast(total_anchors)
            labels = labels * (max_overlaps >= self.cfg.negative_thresh)
            labels = labels * (max_overlaps < self.cfg.positive_thresh) + (
                max_overlaps >= self.cfg.positive_thresh
            )

            bbox_targets = self.box_coder.encode(
                anchors, gt_boxes.ai[argmax_overlaps, :4]
            )

            labels_cat = gt_boxes.ai[argmax_overlaps, 4]
            labels_cat = labels_cat * (1.0 - F.less_equal(F.abs(labels), 1e-5))
            ignore_mask = F.less_equal(F.abs(labels + 1), 1e-5)
            labels_cat = labels_cat * (1 - ignore_mask) - ignore_mask

            # assign low_quality boxes
            if self.cfg.allow_low_quality:
                gt_argmax_overlaps = F.argmax(overlaps, axis=0)
                labels_cat = labels_cat.set_ai(gt_boxes[:, 4])[gt_argmax_overlaps]
                matched_low_bbox_targets = self.box_coder.encode(
                    anchors.ai[gt_argmax_overlaps, :], gt_boxes[:, :4]
                )
                bbox_targets = bbox_targets.set_ai(matched_low_bbox_targets)[
                    gt_argmax_overlaps, :
                ]

            labels_cat_list.append(F.add_axis(labels_cat, 0))
            bbox_targets_list.append(F.add_axis(bbox_targets, 0))

        return (
            F.zero_grad(F.concat(labels_cat_list, axis=0)),
            F.zero_grad(F.concat(bbox_targets_list, axis=0)),
        )
Example #5
0
def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor,
                       beta: float) -> Tensor:
    r"""

    Args:
        pred_bbox (Tensor):
            the predicted bbox with the shape of :math:`(N, 4)`
        gt_bbox (Tensor):
            the ground-truth bbox with the shape of :math:`(N, 4)`
        beta (int):
            the parameter of smooth l1 loss.

    Returns:
        the calculated smooth l1 loss.
    """
    x = pred_bbox - gt_bbox
    abs_x = F.abs(x)
    if beta < 1e-5:
        loss = abs_x
    else:
        in_loss = 0.5 * x**2 / beta
        out_loss = abs_x - 0.5 * beta

        # FIXME: F.where cannot handle 0-shape tensor yet
        # loss = F.where(abs_x < beta, in_loss, out_loss)
        in_mask = abs_x < beta
        loss = in_loss * in_mask + out_loss * (1 - in_mask)
    return loss
Example #6
0
def get_cls_reg_ctr_targets(points, gt_bboxes, bbox_scale = 0.25):
    """
        Compute regression, classification targets for points in multiple images.
        Args:
            points (Tensor): (1, 2, 19, 19).
            gt_bboxes (Tensor): Ground truth bboxes of each image, (B,4), in [tl_x, tl_y, br_x, br_y] format.
        Returns:
            cls_labels (Tensor): Labels. (B, 1, 19, 19)   0 or 1, 0 means background, 1 means in the box.
            bbox_targets (Tensor): BBox targets. (B, 4, 19, 19)  only consider the foreground, for the background should set loss as 0!
            centerness_targets (Tensor): (B, 1, 19, 19)  only consider the foreground, for the background should set loss as 0!
    """
    gt_bboxes = F.add_axis(gt_bboxes, axis=-1)
    gt_bboxes = F.add_axis(gt_bboxes, axis=-1)  # (B,4,1,1)
    # cls_labels
    # 计算四个值以确定是否在内部,由于template比较大,于是缩小bbox为之前的1/2
    gap = (gt_bboxes[:, 2, ...] - gt_bboxes[:, 0, ...]) * (1-bbox_scale) / 2
    up_bound = points[:, 0, ...] > gt_bboxes[:, 0, ...] + gap
    left_bound = points[:, 1, ...] > gt_bboxes[:, 1, ...] + gap
    down_bound = points[:, 0, ...] < gt_bboxes[:, 2, ...] - gap
    right_bound = points[:, 1, ...] < gt_bboxes[:, 3, ...] - gap
    cls_labels = up_bound * left_bound * down_bound * right_bound
    cls_labels = F.add_axis(cls_labels, axis=1)  # (B,1,19,19)

    # bbox_targets
    # 对于points中的每个坐标,计算偏离情况(这里每个坐标都会计算,所以会有负数)
    up_left = points - gt_bboxes[:, 0:2, ...]  # (B, 2, 19, 19)
    bottom_right = gt_bboxes[:, 2:4, ...] - points
    bbox_targets = F.concat([up_left, bottom_right], axis = 1)  # (B, 4, 19, 19)

    # centerness_targets
    up_bottom = F.minimum(up_left[:, 0, ...], bottom_right[:, 0, ...]) / F.maximum(up_left[:, 0, ...], bottom_right[:, 0, ...])
    left_right = F.minimum(up_left[:, 1, ...], bottom_right[:, 1, ...]) / F.maximum(up_left[:, 1, ...], bottom_right[:, 1, ...])
    centerness_targets = F.sqrt(F.abs(up_bottom * left_right))
    return cls_labels, bbox_targets, centerness_targets
Example #7
0
def _bce_loss_with_logits(output, labels, **kwargs):
    r"""
    Sigmoid cross entropy with logits, see tensorflow
    https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
    """
    loss = F.maximum(output, 0) - output * labels + F.log(1 + F.exp(-F.abs(output)))
    return loss.mean()
Example #8
0
 def _L1(diff, occ_mask=None, if_mask_=False):
     loss_diff = F.abs(diff)
     if not if_mask_:
         photo_loss = F.mean(loss_diff)
     else:
         photo_loss = F.sum(loss_diff * occ_mask) / (F.sum(occ_mask) + 1e-6)
     return photo_loss
Example #9
0
 def _abs_robust(diff, occ_mask=None, if_mask_=False):
     loss_diff = F.pow((F.abs(diff) + 0.01), 0.4)
     if not if_mask_:
         photo_loss = F.mean(loss_diff)
     else:
         photo_loss = F.sum(loss_diff * occ_mask) / (F.sum(occ_mask) + 1e-6)
     return photo_loss
Example #10
0
def smooth_l1_loss(pred, target, beta: float):
    abs_x = F.abs(pred - target)
    in_mask = abs_x < beta
    out_mask = 1 - in_mask
    in_loss = 0.5 * abs_x ** 2 / beta
    out_loss = abs_x - 0.5 * beta
    loss = in_loss * in_mask + out_loss * out_mask
    return loss.sum(axis=1)
Example #11
0
 def forward(self, x):
     B, C, _, _ = x.shape
     # avg_dims = tuple(range(2, len(x.shape)))  # [2 ,3 ]
     nu2 = F.expand_dims(F.pow(x, 2).reshape(B, C, -1).mean(axis=-1,
                                                            keepdims=True),
                         axis=-1)  # [B, C, 1, 1]
     x = x / F.sqrt(nu2 + F.abs(self.eps))
     return F.maximum(self.gamma * x + self.beta, self.tau)
Example #12
0
 def func(x, y):
     a = x + y
     a1 = F.relu(a)
     a2 = F.abs(a)
     a3 = F.ceil(a) * 2
     a4 = F.floor(a)
     r = a1 - a2
     r1 = a3 / a4
     return r, r1
Example #13
0
def iou_l1_loss(pred, max_overlaps, gt, ignore_label=-1, background=0):

    pred = pred.reshape(pred.shape[0], -1, max_overlaps.shape[2])
    abs_x = F.abs(pred - max_overlaps)
    mask_bg = 1 - F.equal(gt, background).astype(np.float32)
    mask_ig = 1 - F.equal(gt, ignore_label).astype(np.float32)
    mask = mask_bg * mask_ig

    mask = mask.reshape(mask.shape[0], -1, pred.shape[2])
    loss = (abs_x * mask).sum() / F.maximum(mask.sum(), 1)
    return loss
Example #14
0
def _smooth_l1_base(pred, gt, sigma):

    sigma2 = sigma**2
    cond_point = 1 / sigma2
    x = pred - gt
    abs_x = F.abs(x)
    in_mask = abs_x < cond_point
    out_mask = 1 - in_mask.astype(np.float32)
    in_value = 0.5 * (sigma * x)**2
    out_value = abs_x - 0.5 / sigma2
    value = in_value * in_mask.astype(np.float32) + out_value * out_mask
    return value
Example #15
0
def get_smooth_l1_base(
    pred_bbox: Tensor,
    gt_bbox: Tensor,
    sigma: float,
    is_fix: bool = False,
):
    r"""

    Args:
        pred_bbox (Tensor):
            the predicted bbox with the shape of :math:`(N, 4)`
        gt_bbox (Tensor):
            the ground-truth bbox with the shape of :math:`(N, 4)`
        sigma (int):
            the parameter of smooth l1 loss.
        is_fix (bool):
            is to use huber loss, default is False to use original smooth-l1

    Returns:
        the calculated smooth l1 loss.
    """
    if is_fix:
        sigma = 1 / sigma
        cond_point = sigma
        x = pred_bbox - gt_bbox
        abs_x = F.abs(x)
        in_loss = 0.5 * x**2
        out_loss = sigma * abs_x - 0.5 * sigma**2
    else:
        sigma2 = sigma**2
        cond_point = 1 / sigma2
        x = pred_bbox - gt_bbox
        abs_x = F.abs(x)
        in_loss = 0.5 * x**2 * sigma2
        out_loss = abs_x - 0.5 / sigma2

    in_mask = abs_x < cond_point
    out_mask = 1 - in_mask
    loss = in_loss * in_mask + out_loss * out_mask
    return loss
Example #16
0
    def get_cls_reg_ctr_targets(self, points, gt_bboxes, bbox_scale=0.15):
        """
            Compute regression, classification targets for points in multiple images.
            Args:
                points (Tensor): (1, 2, 37, 37). 每个点在原图上对应的点的位置
                gt_bboxes (Tensor): Ground truth bboxes of each image, (B,4), in [tl_x, tl_y, br_x, br_y] format. 左上角右下角 原图上的bbox框
            Returns:
                cls_labels (Tensor): Labels. (B, 1, 37, 37)   0 or 1, 0 means background, 1 means in the box.
                bbox_targets (Tensor): BBox targets. (B, 4, 37, 37)  only consider the foreground, for the background should set loss as 0!
                centerness_targets (Tensor): (B, 1, 37, 37)  only consider the foreground, for the background should set loss as 0!
        """
        B, _ = gt_bboxes.shape
        gt_bboxes = F.add_axis(gt_bboxes, axis=-1)
        gt_bboxes = F.add_axis(gt_bboxes, axis=-1)  # (B,4,1,1)
        # cls_labels
        # 计算四个值以确定是否在内部,由于template比较大,于是缩小bbox为之前的1/4
        gap = (gt_bboxes[:, 2, ...] -
               gt_bboxes[:, 0, ...]) * (1 - bbox_scale) / 2  #求出bbox的边长
        up_bound = points[:, 0, ...] > gt_bboxes[:, 0, ...] + gap
        left_bound = points[:, 1, ...] > gt_bboxes[:, 1, ...] + gap
        down_bound = points[:, 0, ...] < gt_bboxes[:, 2, ...] - gap
        right_bound = points[:, 1, ...] < gt_bboxes[:, 3, ...] - gap
        cls_labels = up_bound * left_bound * down_bound * right_bound
        cls_labels = F.add_axis(cls_labels, axis=1)  # (B, 1, 37, 37)
        cls_labels.requires_grad = False

        # bbox_targets
        # 对于points中的每个坐标,计算偏离情况(这里每个坐标都会计算,所以会有负数)
        up_left = points - gt_bboxes[:, 0:2,
                                     ...]  # (B, 2, 37, 37) score map每个点和左上角点的差
        bottom_right = gt_bboxes[:, 2:4, ...] - points
        bbox_targets = F.concat([up_left, bottom_right],
                                axis=1)  # (B, 4, 37, 37)
        bbox_targets.requires_grad = False

        # centerness_targets
        up_bottom = F.minimum(up_left[:, 0, ...],
                              bottom_right[:, 0, ...]) / F.maximum(
                                  up_left[:, 0, ...], bottom_right[:, 0, ...])
        left_right = F.minimum(up_left[:, 1, ...],
                               bottom_right[:, 1, ...]) / F.maximum(
                                   up_left[:, 1, ...], bottom_right[:, 1, ...])
        centerness_targets = F.sqrt(F.abs(up_bottom * left_right))
        centerness_targets = F.add_axis(centerness_targets,
                                        axis=1)  # (B,1,37,37)
        centerness_targets.requires_grad = False
        return cls_labels, bbox_targets, centerness_targets
Example #17
0
 def forward(self, x, bridge):
     up = self.up(x)
     bridge = self.skip_m(bridge)
     out = F.concat([up, bridge], 1)
     if self.subnet:
         b_, c_, h_, w_ = bridge.shape
         sub = self.subnet(out)
         V_t = sub.reshape(b_, self.num_subspace, h_ * w_)
         V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
         V = V_t.transpose(0, 2, 1)
         mat = F.matmul(V_t, V)
         mat_inv = F.matinv(mat)
         project_mat = F.matmul(mat_inv, V_t)
         bridge_ = bridge.reshape(b_, c_, h_ * w_)
         project_feature = F.matmul(project_mat, bridge_.transpose(0, 2, 1))
         bridge = F.matmul(V, project_feature).transpose(0, 2, 1).reshape(
             b_, c_, h_, w_)
         out = F.concat([up, bridge], 1)
     out = self.conv_block(out)
     return out
Example #18
0
def smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
    r"""Smooth L1 Loss

    Args:
        pred (Tensor):
            the predictions
        target (Tensor):
            the assigned targets with the same shape as pred
        beta (int):
            the parameter of smooth l1 loss.

    Returns:
        the calculated smooth l1 loss.
    """
    x = pred - target
    abs_x = F.abs(x)
    if beta < 1e-5:
        loss = abs_x
    else:
        in_loss = 0.5 * x ** 2 / beta
        out_loss = abs_x - 0.5 * beta
        loss = F.where(abs_x < beta, in_loss, out_loss)
    return loss
Example #19
0
 def forward(self, a):
     # add
     if self.mode == "add":
         x = a + mge.tensor(np.float32(10))
         y = a + mge.tensor(self.data1)
         z = x + y
     # sub
     elif self.mode == "sub":
         x = a - mge.tensor(np.float32(10))
         y = a - mge.tensor(self.data1)
         z = x - y
     # mul
     elif self.mode == "mul":
         x = a * mge.tensor(np.float32(10))
         y = mge.tensor(self.data1) * a
         z = x * y
     # div
     elif self.mode == "div":
         y = mge.tensor(self.data1) / a
         x = a / mge.tensor(np.float32(2))
         z = y / x
     # cycle_div
     elif self.mode == "cycle_div":
         z = a / mge.tensor(self.data1)
     # abs
     elif self.mode == "abs":
         z = F.abs(a)
     # exp
     elif self.mode == "exp":
         z = F.exp(a)
     # log
     elif self.mode == "log":
         z = F.log(a)
     else:
         raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
     return z
Example #20
0
def softplus(x: Tensor) -> Tensor:
    return F.log(1 + F.exp(-F.abs(x))) + F.relu(x)
Example #21
0
def _anchor_double_target(gt_boxes, im_info, all_anchors):

    gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
    all_anchors = all_anchors.detach()

    gt_boxes = gt_boxes[:im_info[5].astype(np.int32), :]
    dummy = -F.ones([1, gt_boxes.shape[1]]).to(gt_boxes.device)
    gt_boxes = F.concat([gt_boxes, dummy], axis=0)
    valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)

    anchor_centers = _compute_center(all_anchors)
    gtboxes_centers = _compute_center(gt_boxes)
    # gtboxes_centers = gtboxes_centers * valid_mask.unsqueeze(1)
    gtboxes_centers = gtboxes_centers * F.expand_dims(valid_mask, axis=1)

    N, K = all_anchors.shape[0], gt_boxes.shape[0]
    an_centers = F.expand_dims(anchor_centers, axis=1)
    gt_centers = F.expand_dims(gtboxes_centers, axis=0)
    # an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
    # gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)

    distance = F.abs(an_centers - gt_centers)
    distance = F.sqrt(F.pow(distance, 2).sum(axis=2))

    start = 0
    end = 5
    overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
    overlaps *= F.expand_dims(valid_mask, axis=0)
    default_num = 16

    ious_list = []

    for l in range(start, end):

        _, index = F.cond_take(all_anchors[:, 4] == l, all_anchors[:, 4])

        level_dist = distance[index, :].transpose(1, 0)
        ious = overlaps[index, :].transpose(1, 0)
        sorted_index = F.argsort(level_dist, descending=False)
        n = min(sorted_index.shape[1], default_num)
        ious = F.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)

        ious_list.append(ious)

    ious = F.concat(ious_list, axis=0)
    mean_var = F.mean(ious, axis=0)
    std_var = F.std(ious, 0)
    iou_thresh_per_gt = mean_var + std_var

    iou_thresh_per_gt = F.maximum(iou_thresh_per_gt, 0.2)

    # limits the anchor centers in the gtboxes
    N, K = all_anchors.shape[0], gt_boxes.shape[0]
    anchor_points = an_centers
    pos_area = _compute_pos_area(gt_boxes, 0.3)
    # pos_area = pos_area.unsqueeze(0).repeat(N, 1, 1)
    pos_area = F.broadcast_to(F.expand_dims(pos_area, axis=0),
                              (N, K, pos_area.shape[-1]))

    l = anchor_points[:, :, 0] - pos_area[:, :, 0]
    r = pos_area[:, :, 2] - anchor_points[:, :, 0]
    t = anchor_points[:, :, 1] - pos_area[:, :, 1]
    b = pos_area[:, :, 3] - anchor_points[:, :, 1]

    is_in_gt = F.stack([l, r, t, b], axis=2)
    is_in_gt = is_in_gt.min(axis=2) > 0.1
    valid_mask = (overlaps >= F.expand_dims(
        iou_thresh_per_gt, axis=0)) * is_in_gt.astype(np.float32)
    ious = overlaps * valid_mask

    sorted_index = F.argsort(ious, 1)
    sorted_overlaps = F.gather(ious, 1, sorted_index)
    max_overlaps = sorted_overlaps[:, :2].flatten()
    argmax_overlaps = sorted_index[:, :2].flatten()

    n, c = all_anchors.shape
    device = all_anchors.device
    labels = -F.ones(2 * n).to(device)
    positive_mask = (max_overlaps >= 0.2).to(device).astype(np.float32)
    negative_mask = (max_overlaps < 0.2).to(device).astype(np.float32)
    labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)

    bbox_targets = gt_boxes[argmax_overlaps, :4]
    all_anchors = F.broadcast_to(F.expand_dims(all_anchors, axis=1),
                                 (n, 2, c)).reshape(-1, c)

    bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)

    labels_cat = gt_boxes[argmax_overlaps, 4]
    labels_cat = labels_cat * (1 - F.equal(labels, -1).astype(
        np.float32)) - F.equal(labels, -1).astype(np.float32)

    return labels, bbox_targets, labels_cat
Example #22
0
 def photo_loss(self, img1, img2_warp, occu_mask1):
     l1_loss = self.params.loss.l1 * F.abs(img1 - img2_warp) * occu_mask1
     ssim_loss = self.params.loss.ssim * SSIM(img1 * occu_mask1,
                                              img2_warp * occu_mask1)
     return sum([l1_loss.mean(), ssim_loss.mean()])
Example #23
0
    def forward(self, a):
        # add
        if self.mode == "add":
            x = a + mge.tensor(np.float32(10))
            y = a + mge.tensor(self.data1)
            z = x + y
        # sub
        elif self.mode == "sub":
            x = a - mge.tensor(np.float32(10))
            y = a - mge.tensor(self.data1)
            z = x - y
        # mul
        elif self.mode == "mul":
            x = a * mge.tensor(np.float32(10))
            y = mge.tensor(self.data1) * a
            z = x * y
        # div
        elif self.mode == "max":
            x = a + mge.tensor(self.data)
            y = a + mge.tensor(self.data2)
            z = F.maximum(x, y)
        elif self.mode == "min":
            x = a + mge.tensor(self.data)
            y = a + mge.tensor(self.data2)
            z = F.minimum(x, y)

        elif self.mode == "pow":
            z = a**2

        elif self.mode == "ceil":
            z = F.ceil(a)

        elif self.mode == "floor":
            z = F.floor(a)

        elif self.mode == "div":
            y = mge.tensor(self.data1) / a
            x = a / mge.tensor(np.float32(2))
            z = y / x
        # cycle_div
        elif self.mode == "cycle_div":
            z = a / mge.tensor(self.data1)
        # abs
        elif self.mode == "abs":
            z = F.abs(a)
        # exp
        elif self.mode == "exp":
            z = F.exp(a)
        # log
        elif self.mode == "log":
            z = F.log(a)
        elif self.mode == "fuse_add_relu":
            y = a + mge.tensor(self.data2)
            z = F.relu(y)
        elif self.mode == "fuse_mul_add3":
            y = a * mge.tensor(self.data1)
            z = y + mge.tensor(self.data2)
        elif self.mode == "fuse_add_sigmoid":
            y = a + mge.tensor(self.data2)
            z = F.sigmoid(y)
        else:
            raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
        return z
 def forward(self, x, y):
     # mask
     ab = F.abs(x - y)
     mask = (ab > self.t).astype("float32")
     return F.sum(mask * ab) / (F.sum(mask) + self.eps)
Example #25
0
def get_loss_l1(pred: mge.Tensor, label: mge.Tensor, norm_k: mge.Tensor):
    B = pred.shape[0]
    L1 = F.abs(pred - label).reshape(B, -1).mean(axis=1)
    L1 = L1 / norm_k.flatten()
    return L1.mean()
Example #26
0
def _anchor_target(gt_boxes, im_info, all_anchors):

    gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
    all_anchors = all_anchors.detach()

    gt_boxes = gt_boxes[:im_info[5], :]
    valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)

    anchor_centers = _compute_center(all_anchors)
    gtboxes_centers = _compute_center(gt_boxes) * F.expand_dims(valid_mask,
                                                                axis=0)

    N, K = all_anchors.shape[0], gt_boxes.shape[0]
    # an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
    an_centers = F.expand_dims(anchor_centers, axis=1)
    gt_centers = F.expand_dims(gtboxes_centers, axis=0)
    # gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)

    distance = F.abs(an_centers - gt_centers)
    distance = F.sqrt(F.pow(distance, 2).sum(axis=2))

    start = 0
    end = 5
    overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
    overlaps = overlaps * valid_mask.unsqueeze(0)
    default_num = 9

    ious_list = []
    for l in range(start, end):

        index = torch.nonzero(all_anchors[:, 4].eq(l), as_tuple=False)[:, 0]
        level_dist = level_dist[index, :].transpose(1, 0)
        ious = distance[index, :].transpose(1, 0)
        sorted_index = torch.argsort(ious, 1, descending=False)
        n = min(default_num, sorted_index.shape[1])
        ious = torch.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
        ious_list.append(ious)

    ious = F.concat(ious_list, axis=0)
    mean_var = ious.mean(0)
    std_var = ious.std(0)
    iou_thresh_per_gt = mean_var + std_var

    iou_thresh_per_gt = torch.clamp(iou_thresh_per_gt, 0.35)
    n = iou_thresh_per_gt.shape[0]

    # limits the anchor centers in the gtboxes
    N, K = all_anchors.shape[0], gt_boxes.shape[0]
    anchor_points = an_centers
    proxies = gt_boxes.unsqueeze(0).repeat(N, 1, 1)
    l = anchor_points[:, :, 0] - proxies[:, :, 0]
    r = proxies[:, :, 2] - anchor_points[:, :, 0]
    t = anchor_points[:, :, 1] - proxies[:, :, 1]
    b = proxies[:, :, 3] - anchor_points[:, :, 1]

    is_in_gt = F.stack([l, r, t, b], axis=2)
    is_in_gt = is_in_gt.min(axis=2) > 0.1
    valid_mask = (overlaps >= iou_thresh_per_gt.unsqueeze(0)) * is_in_gt
    ious = overlaps * valid_mask

    argmax_overlaps = torch.argmax(ious, axis=1)
    max_overlaps = torch.gather(ious, 1, argmax_overlaps.unsqueeze(1))

    n = all_anchors.shape[0]
    labels = -F.ones(n)
    positive_mask = max_overlaps > 0
    negative_mask = max_overlaps < config.rpn_negative_overlap
    labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)

    bbox_targets = gt_boxes[argmax_overlaps, :4]
    bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)

    labels_cat = gt_boxes[argmax_overlaps, 4]
    labels_cat = labels_cat * (1 - labels.eq(0).astype(np.float32))
    labels_cat = labels_cat * (1 - labels.eq(-1).astype(
        np.float32)) - labels.eq(-1).astype(np.float32)

    return labels, bbox_targets, labels_cat