示例#1
0
def test_add_update_params():
    b = np.random.random((2, 3)).astype(np.float32)
    y = Buffer(b)

    @jit.trace
    def f(x):
        return F.add_update(y, x)

    f(np.zeros((2, 3)).astype(np.float32))

    z = Buffer(np.zeros((2, 3)).astype(np.float32))
    F.add_update(y, z, beta=0.1)

    res = f(np.ones((2, 3)).astype(np.float32))
    assertTensorClose(res, b + 1)
示例#2
0
def test_add_update():
    shape = (2, 3)
    v = np.random.random(shape).astype(np.float32)
    b = Buffer(v)

    u = F.add_update(b, 1)
    assertTensorClose(u.numpy(), v + 1)
    u = F.add_update(b, 1)
    assertTensorClose(u.numpy(), v + 2)

    x = np.ones((2, 2), dtype=np.float32)
    y = x * 0.5
    dest = tensor(x)
    delta = tensor(y)
    r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
    assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
示例#3
0
 def f(x):
     return F.add_update(y, x)
示例#4
0
    def forward(self, inputs):
        image = self.preprocess_image(inputs["image"])
        features = self.backbone(image)
        features = [features[f] for f in self.in_features]

        box_logits, box_offsets = self.head(features)

        box_logits_list = [
            _.dimshuffle(0, 2, 3, 1).reshape(self.batch_size, -1,
                                             self.cfg.num_classes)
            for _ in box_logits
        ]
        box_offsets_list = [
            _.dimshuffle(0, 2, 3, 1).reshape(self.batch_size, -1, 4)
            for _ in box_offsets
        ]

        anchors_list = [
            self.anchor_gen(features[i], self.stride_list[i])
            for i in range(len(features))
        ]

        all_level_box_logits = F.concat(box_logits_list, axis=1)
        all_level_box_offsets = F.concat(box_offsets_list, axis=1)
        all_level_anchors = F.concat(anchors_list, axis=0)

        if self.training:
            box_gt_scores, box_gt_offsets = self.get_ground_truth(
                all_level_anchors,
                inputs["gt_boxes"],
                inputs["im_info"][:, 4].astype(np.int32),
            )
            norm_type = "none" if self.cfg.loss_normalizer_momentum > 0.0 else "fg"
            rpn_cls_loss = layers.get_focal_loss(
                all_level_box_logits,
                box_gt_scores,
                alpha=self.cfg.focal_loss_alpha,
                gamma=self.cfg.focal_loss_gamma,
                norm_type=norm_type,
            )
            rpn_bbox_loss = (layers.get_smooth_l1_loss(
                all_level_box_offsets,
                box_gt_offsets,
                box_gt_scores,
                self.cfg.smooth_l1_beta,
                norm_type=norm_type,
            ) * self.cfg.reg_loss_weight)

            if norm_type == "none":
                F.add_update(
                    self.loss_normalizer,
                    (box_gt_scores > 0).sum(),
                    alpha=self.cfg.loss_normalizer_momentum,
                    beta=1 - self.cfg.loss_normalizer_momentum,
                )
                rpn_cls_loss = rpn_cls_loss / F.maximum(
                    self.loss_normalizer, 1)
                rpn_bbox_loss = rpn_bbox_loss / F.maximum(
                    self.loss_normalizer, 1)

            total = rpn_cls_loss + rpn_bbox_loss
            loss_dict = {
                "total_loss": total,
                "loss_cls": rpn_cls_loss,
                "loss_loc": rpn_bbox_loss,
            }
            self.cfg.losses_keys = list(loss_dict.keys())
            return loss_dict
        else:
            # currently not support multi-batch testing
            assert self.batch_size == 1

            transformed_box = self.box_coder.decode(
                all_level_anchors,
                all_level_box_offsets[0],
            )
            transformed_box = transformed_box.reshape(-1, 4)

            scale_w = inputs["im_info"][0, 1] / inputs["im_info"][0, 3]
            scale_h = inputs["im_info"][0, 0] / inputs["im_info"][0, 2]
            transformed_box = transformed_box / F.concat(
                [scale_w, scale_h, scale_w, scale_h], axis=0)
            clipped_box = layers.get_clipped_box(
                transformed_box, inputs["im_info"][0, 2:4]).reshape(-1, 4)
            all_level_box_scores = F.sigmoid(all_level_box_logits)
            return all_level_box_scores[0], clipped_box
示例#5
0
 def _apply_lipshitz_constraint(self):
     """Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
     for p in self.parameters():
         F.add_update(p, F.clamp(p, lower=-3e-2, upper=3e-2), alpha=0)
示例#6
0
 def f():
     F.add_update(x, 1)
     return x + 1