예제 #1
0
def test_cross_entropy_with_softmax():
    data1_shape = (1, 2)
    label1_shape = (1, )
    data2_shape = (1, 3)
    label2_shape = (1, )

    data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
    label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
    expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()

    data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
    label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
    expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()

    cases = [
        {
            "input": [data1, label1],
            "output": expect1,
        },
        {
            "input": [data2, label2],
            "output": expect2,
        },
    ]
    opr_test(cases, F.cross_entropy_with_softmax)
예제 #2
0
 def forward(self, x):
     x = self.fc0(x)
     x = self.bn0(x)
     x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
     x = self.fc1(x)
     x = self.bn1(x)
     x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
     x = self.fc2(x)
     return x
예제 #3
0
def test_compile_multi_times_static():
    return  # XXX: rewrite or remove this test
    with Graph() as cg:
        cg.set_option("eager_evaluation", False)
        data = Input("data", shape=(2, 28))
        label = Input("label", shape=(2, ), dtype=np.int32)

        mlp = MLP()
        opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)

        pred0 = mlp(data)
        pred = F.softmax(pred0)
        loss = F.square_loss(pred, label.reshape(2, 1))
        opt.zero_grad()
        grads = opt.backward(loss)
        opt.step()

        f0 = compile(pred, None)
        f1 = compile([pred, loss], grads, copy=True)

        data = np.random.random((2, 28)).astype(np.float32)
        label = np.random.randint(0, 10, (2, )).astype(np.float32)
        out0 = f0(data=data)
        out1 = f1(data=data, label=label)
        assertTensorClose(out0[0], out1[0])

        _ = compile([pred, loss], grads, copy=False)
        with pytest.raises(mgb.MegBrainError):
            f0(data=data)
예제 #4
0
    def forward(self, x):
        # x: bxlxi
        x = self.init_map(x)  # bxlxe
        ori = x
        p = self.position_encoding(eye)
        x = x + p

        values = self.value_mapping(x)  #bxlxe
        keys = self.key_mapping(x)  #bxlxe
        querys = self.key_mapping(x)

        #print('transformer', values.shape, keys.shape, querys.shape)

        attention = F.softmax(F.batched_matrix_mul(querys,
                                                   keys.dimshuffle(0, 2, 1)),
                              axis=1)  #bxlxl
        #print(attention[0])
        #print(attention[0].sum(axis=0))
        #print('attention', attention.shape)
        out = F.batched_matrix_mul(values.dimshuffle(0, 2, 1), attention)

        out = out.dimshuffle(0, 2, 1)
        out = out + ori
        out = F.relu(out)
        #a,b,c = out.shape[0], out.shape[1], out.shape[2]
        #tmp = out.reshape(-1, self.key_embedding)
        #i = tmp.shape[0]
        #out = self.norm(tmp)
        #out = out.reshape(a,b,c)

        return out
예제 #5
0
    def forward(self, x):
        B, C, H, W = x.shape
        N = self.frames
        C = C // N
        A2 = F.dimshuffle(self.A2(x).reshape(B, N, C, H, W), (0, 2, 1, 3, 4)).reshape(B, C, N*H*W)
        B2 = F.dimshuffle(self.B2(x).reshape(B, N, C, H, W), (0, 1, 3, 4, 2)).reshape(B, N*H*W, C)
        A3 = self.A3(x).reshape(B, N, C, H, W).reshape(B, N, C*H*W)
        B3 = F.dimshuffle(self.B3(x).reshape(B, N, C, H, W).reshape(B, N, C*H*W), (0, 2, 1))

        D2 = F.dimshuffle(self.D2(x).reshape(B, N, C, H, W), (0, 2, 1, 3, 4)).reshape(B, C, N*H*W)
        D3 = self.D3(x).reshape(B, N, C, H, W).reshape(B, N, C*H*W)

        attention2 = F.softmax(F.batched_matrix_mul(A2, B2), axis = -1)  # [B, C, C]
        attention3 = F.softmax(F.batched_matrix_mul(A3, B3), axis = -1)  # [B, N, N]

        E2 = F.dimshuffle(F.batched_matrix_mul(attention2, D2).reshape(B, C, N, H, W), (0, 2, 1, 3, 4)).reshape(B, N*C, H, W)
        E3 = F.batched_matrix_mul(attention3, D3).reshape(B, N*C, H, W)
        return x + E2 + E3
예제 #6
0
 def forward(self, x):
     bs = x.shape[0]
     if self.radix > 1:
         x = x.reshape((bs, self.cardinality, self.radix, -1))
         x = F.softmax(x, axis=1)
         x = x.reshape(bs, -1)
     else:
         x = F.sigmoid(x)
     return x
예제 #7
0
 def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
     # stride: 64,32,16,8,4 -> 4, 8, 16, 32
     fpn_fms = fpn_fms[1:][::-1]
     stride = [4, 8, 16, 32]
     pool_features, rcnn_rois, labels, bbox_targets = roi_pool(
             fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
             labels, bbox_targets)
     flatten_feature = F.flatten(pool_features, start_axis=1)
     roi_feature = F.relu(self.fc1(flatten_feature))
     roi_feature = F.relu(self.fc2(roi_feature))
     pred_emd_pred_cls_0 = self.emd_pred_cls_0(roi_feature)
     pred_emd_pred_delta_0 = self.emd_pred_delta_0(roi_feature)
     pred_emd_pred_cls_1 = self.emd_pred_cls_1(roi_feature)
     pred_emd_pred_delta_1 = self.emd_pred_delta_1(roi_feature)
     if self.training:
         loss0 = emd_loss(
                     pred_emd_pred_delta_0, pred_emd_pred_cls_0,
                     pred_emd_pred_delta_1, pred_emd_pred_cls_1,
                     bbox_targets, labels)
         loss1 = emd_loss(
                     pred_emd_pred_delta_1, pred_emd_pred_cls_1,
                     pred_emd_pred_delta_0, pred_emd_pred_cls_0,
                     bbox_targets, labels)
         loss = F.concat([loss0, loss1], axis=1)
         indices = F.argmin(loss, axis=1)
         loss_emd = F.indexing_one_hot(loss, indices, 1)
         loss_emd = loss_emd.sum()/loss_emd.shapeof()[0]
         loss_dict = {}
         loss_dict['loss_rcnn_emd'] = loss_emd
         return loss_dict
     else:
         pred_scores_0 = F.softmax(pred_emd_pred_cls_0)[:, 1:].reshape(-1, 1)
         pred_scores_1 = F.softmax(pred_emd_pred_cls_1)[:, 1:].reshape(-1, 1)
         pred_delta_0 = pred_emd_pred_delta_0[:, 4:].reshape(-1, 4)
         pred_delta_1 = pred_emd_pred_delta_1[:, 4:].reshape(-1, 4)
         target_shape = (rcnn_rois.shapeof()[0], config.num_classes - 1, 4)
         base_rois = F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
         pred_bbox_0 = restore_bbox(base_rois, pred_delta_0, True)
         pred_bbox_1 = restore_bbox(base_rois, pred_delta_1, True)
         pred_bbox_0 = F.concat([pred_bbox_0, pred_scores_0], axis=1)
         pred_bbox_1 = F.concat([pred_bbox_1, pred_scores_1], axis=1)
         #[{head0, pre1, tag1}, {head1, pre1, tag1}, {head0, pre1, tag2}, ...]
         pred_bbox = F.concat((pred_bbox_0, pred_bbox_1), axis=1).reshape(-1,5)
         return pred_bbox
예제 #8
0
    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = F.relu(x)
        x = self.fc3(x)
        x = F.relu(x)
        x = self.fc4(x)
        prob = F.softmax(x)

        return prob
예제 #9
0
파일: rcnn.py 프로젝트: FateScript/Models-1
    def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
        rcnn_rois, labels, bbox_targets = self.get_ground_truth(
            rcnn_rois, im_info, gt_boxes)

        fpn_fms = [fpn_fms[x] for x in self.in_features]
        pool_features = layers.roi_pool(
            fpn_fms,
            rcnn_rois,
            self.stride,
            self.pooling_size,
            self.pooling_method,
        )
        flatten_feature = F.flatten(pool_features, start_axis=1)
        roi_feature = F.relu(self.fc1(flatten_feature))
        roi_feature = F.relu(self.fc2(roi_feature))
        pred_cls = self.pred_cls(roi_feature)
        pred_delta = self.pred_delta(roi_feature)

        if self.training:
            # loss for classification
            loss_rcnn_cls = layers.softmax_loss(pred_cls, labels)
            # loss for regression
            pred_delta = pred_delta.reshape(-1, self.cfg.num_classes + 1, 4)

            vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
            pred_delta = F.indexing_one_hot(pred_delta, vlabels, axis=1)

            loss_rcnn_loc = layers.get_smooth_l1_loss(
                pred_delta,
                bbox_targets,
                labels,
                self.cfg.rcnn_smooth_l1_beta,
                norm_type="all",
            )
            loss_dict = {
                'loss_rcnn_cls': loss_rcnn_cls,
                'loss_rcnn_loc': loss_rcnn_loc
            }
            return loss_dict
        else:
            # slice 1 for removing background
            pred_scores = F.softmax(pred_cls, axis=1)[:, 1:]
            pred_delta = pred_delta[:, 4:].reshape(-1, 4)
            target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
            # rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
            base_rois = F.add_axis(rcnn_rois[:, 1:5],
                                   1).broadcast(target_shape).reshape(-1, 4)
            pred_bbox = self.box_coder.decode(base_rois, pred_delta)
            return pred_bbox, pred_scores
예제 #10
0
    def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
        rcnn_rois, labels, bbox_targets = self.get_ground_truth(
            rcnn_rois, im_info, gt_boxes)

        fpn_fms = [fpn_fms[x] for x in self.in_features]
        pool_features = layers.roi_pool(
            fpn_fms,
            rcnn_rois,
            self.stride,
            self.pooling_size,
            self.pooling_method,
        )
        flatten_feature = F.flatten(pool_features, start_axis=1)
        roi_feature = F.relu(self.fc1(flatten_feature))
        roi_feature = F.relu(self.fc2(roi_feature))
        pred_logits = self.pred_cls(roi_feature)
        pred_offsets = self.pred_delta(roi_feature)

        if self.training:
            # loss for rcnn classification
            loss_rcnn_cls = F.loss.cross_entropy(pred_logits, labels, axis=1)
            # loss for rcnn regression
            pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes, 4)
            num_samples = labels.shape[0]
            fg_mask = labels > 0
            loss_rcnn_bbox = layers.smooth_l1_loss(
                pred_offsets[fg_mask, labels[fg_mask] - 1],
                bbox_targets[fg_mask],
                self.cfg.rcnn_smooth_l1_beta,
            ).sum() / F.maximum(num_samples, 1)

            loss_dict = {
                "loss_rcnn_cls": loss_rcnn_cls,
                "loss_rcnn_bbox": loss_rcnn_bbox,
            }
            return loss_dict
        else:
            # slice 1 for removing background
            pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
            pred_offsets = pred_offsets.reshape(-1, 4)
            target_shape = (rcnn_rois.shape[0], self.cfg.num_classes, 4)
            # rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
            base_rois = F.broadcast_to(
                F.expand_dims(rcnn_rois[:, 1:5], axis=1),
                target_shape).reshape(-1, 4)
            pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
            return pred_bbox, pred_scores
예제 #11
0
    def forward(self, x, position):
        # x: bxlxi

        values = x  # bxlxe

        querys = self.query_mapping1(position)
        keys = self.key_mapping1(position)

        attention = F.softmax(F.batched_matrix_mul(querys,
                                                   keys.dimshuffle(0, 2, 1)),
                              axis=2)  #bxlxl
        out = F.batched_matrix_mul(values.dimshuffle(0, 2, 1),
                                   attention.dimshuffle(0, 2, 1))

        out = out.dimshuffle(0, 2, 1)

        return out
예제 #12
0
 def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
     # stride: 64,32,16,8,4 -> 4, 8, 16, 32
     fpn_fms = fpn_fms[1:][::-1]
     stride = [4, 8, 16, 32]
     pool_features, rcnn_rois, labels, bbox_targets = roi_pool(
         fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align', labels,
         bbox_targets)
     flatten_feature = F.flatten(pool_features, start_axis=1)
     roi_feature = F.relu(self.fc1(flatten_feature))
     roi_feature = F.relu(self.fc2(roi_feature))
     pred_cls = self.pred_cls(roi_feature)
     pred_delta = self.pred_delta(roi_feature)
     if self.training:
         # loss for regression
         labels = labels.astype(np.int32).reshape(-1)
         # mulitple class to one
         pos_masks = labels > 0
         pred_delta = pred_delta.reshape(-1, config.num_classes, 4)
         indexing_label = (labels * pos_masks).reshape(-1, 1)
         indexing_label = indexing_label.broadcast((labels.shapeof()[0], 4))
         pred_delta = F.indexing_one_hot(pred_delta, indexing_label, 1)
         localization_loss = smooth_l1_loss(pred_delta, bbox_targets,
                                            config.rcnn_smooth_l1_beta)
         localization_loss = localization_loss * pos_masks
         # loss for classification
         valid_masks = labels >= 0
         objectness_loss = softmax_loss(pred_cls, labels)
         objectness_loss = objectness_loss * valid_masks
         normalizer = 1.0 / (valid_masks.sum())
         loss_rcnn_cls = objectness_loss.sum() * normalizer
         loss_rcnn_loc = localization_loss.sum() * normalizer
         loss_dict = {}
         loss_dict['loss_rcnn_cls'] = loss_rcnn_cls
         loss_dict['loss_rcnn_loc'] = loss_rcnn_loc
         return loss_dict
     else:
         pred_scores = F.softmax(pred_cls)[:, 1:].reshape(-1, 1)
         pred_delta = pred_delta[:, 4:].reshape(-1, 4)
         target_shape = (rcnn_rois.shapeof()[0], config.num_classes - 1, 4)
         base_rois = F.add_axis(rcnn_rois[:, 1:5],
                                1).broadcast(target_shape).reshape(-1, 4)
         pred_bbox = restore_bbox(base_rois, pred_delta, True)
         pred_bbox = F.concat([pred_bbox, pred_scores], axis=1)
         return pred_bbox
예제 #13
0
    def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
        # stride: 64,32,16,8,4 -> 4, 8, 16, 32
        fpn_fms = fpn_fms[1:]
        fpn_fms.reverse()
        stride = [4, 8, 16, 32]
        poo5, rcnn_rois, labels, bbox_targets = roi_pool(
                fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
                labels, bbox_targets)
        poo5 = F.flatten(poo5, start_axis=1)
        fc1 = F.relu(self.fc1(poo5))
        fc2 = F.relu(self.fc2(fc1))

        cls_scores = self.cls(fc2)
        pred_boxes = self.bbox(fc2)
        # a = self.a(fc2)
        # b = self.b(fc2)
        # prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
        prob = F.concat([pred_boxes, cls_scores], axis=1)
        if self.training:
           
            # emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
            bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
            cls_loss = softmax_loss(cls_scores, labels)

            pred_boxes = pred_boxes.reshape(-1, self.n, 4)
            bbox_loss = smooth_l1_loss_rcnn(pred_boxes, bbox_targets, labels,   \
                config.rcnn_smooth_l1_beta)

            loss_dict = {}
            loss_dict['cls_loss'] = cls_loss
            loss_dict['bbox_loss'] =  bbox_loss
            return loss_dict
        else:

            offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
            pred_bbox = offsets.reshape(-1, self.n, 4)
            cls_prob = F.softmax(cls_scores, axis=1)
            n = rcnn_rois.shape[0]
            rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 1, 4)).reshape(-1, 4)
            normalized = config.rcnn_bbox_normalize_targets
            pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
            pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
            return pred_bbox
예제 #14
0
 def forward(self, fpn_fms, proposals, labels=None, bbox_targets=None):
     # input p2-p5
     fpn_fms = fpn_fms[1:][::-1]
     stride = [4, 8, 16, 32]
     #pool_features = roi_pooler(fpn_fms, proposals, stride, (7, 7), "ROIAlignV2")
     pool_features, proposals, labels, bbox_targets = roi_pool(
             fpn_fms, proposals, stride, (7, 7), 'roi_align',
             labels, bbox_targets)
     flatten_feature = F.flatten(pool_features, start_axis=1)
     roi_feature = F.relu(self.fc1(flatten_feature))
     roi_feature = F.relu(self.fc2(roi_feature))
     pred_cls = self.pred_cls(roi_feature)
     pred_delta = self.pred_delta(roi_feature)
     if self.training:
         # loss for regression
         labels = labels.astype(np.int32).reshape(-1)
         # mulitple class to one
         pos_masks = labels > 0
         localization_loss = smooth_l1_loss(
             pred_delta,
             bbox_targets,
             config.rcnn_smooth_l1_beta)
         localization_loss = localization_loss * pos_masks
         # loss for classification
         valid_masks = labels >= 0
         objectness_loss = softmax_loss(
             pred_cls,
             labels)
         objectness_loss = objectness_loss * valid_masks
         normalizer = 1.0 / (valid_masks.sum())
         loss_rcnn_cls = objectness_loss.sum() * normalizer
         loss_rcnn_loc = localization_loss.sum() * normalizer
         loss_dict = {}
         loss_dict[self.stage_name + '_cls'] = loss_rcnn_cls
         loss_dict[self.stage_name + '_loc'] = loss_rcnn_loc
         pred_bbox = restore_bbox(proposals[:, 1:5], pred_delta, True)
         pred_proposals = F.zero_grad(F.concat([proposals[:, 0].reshape(-1, 1), pred_bbox], axis=1))
         return pred_proposals, loss_dict
     else:
         pred_scores = F.softmax(pred_cls)[:, 1].reshape(-1, 1)
         pred_bbox = restore_bbox(proposals[:, 1:5], pred_delta, True)
         pred_proposals = F.concat([proposals[:, 0].reshape(-1, 1), pred_bbox], axis=1)
         return pred_proposals, pred_scores
예제 #15
0
    def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
        # stride: 64,32,16,8,4 -> 4, 8, 16, 32
        fpn_fms = fpn_fms[1:]
        fpn_fms.reverse()
        stride = [4, 8, 16, 32]
        poo5, rcnn_rois, labels, bbox_targets = roi_pool(
            fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align', labels,
            bbox_targets)
        poo5 = F.flatten(poo5, start_axis=1)
        fc1 = F.relu(self.fc1(poo5))
        fc2 = F.relu(self.fc2(fc1))

        a = self.a(fc2)
        b = self.b(fc2)
        prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])

        if self.refinement:
            final_prob = self.refinement_module(prob, fc2)

        if self.training:

            emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
            loss_dict = {}
            loss_dict['loss_rcnn_emd'] = emd_loss
            if self.refinement_module:
                final_emd_loss = self.compute_gemini_loss(
                    final_prob, bbox_targets, labels)
                loss_dict['final_rcnn_emd'] = final_emd_loss
            return loss_dict
        else:

            offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
            pred_bbox = offsets.reshape(-1, self.n, 4)
            cls_prob = F.softmax(cls_scores, axis=1)
            n = rcnn_rois.shape[0]
            rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1),
                                  (n, 2, 4)).reshape(-1, 4)
            normalized = config.rcnn_bbox_normalize_targets
            pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
            pred_bbox = F.concat(
                [pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
            return pred_bbox
예제 #16
0
    def recover_pred_boxes(self, rcnn_rois, prob, nhead):

        n = prob.shape[0]
        prob = prob.reshape(n, nhead, -1)
        prob = prob.reshape(-1, prob.shape[2])

        cls_score, bbox_pred = prob[:, -self.n:], prob[:, :-self.n]
        cls_prob = F.softmax(cls_score, axis=1)
        m, c = rcnn_rois.shape
        rois = F.broadcast_to(F.expand_dims(rcnn_rois, axis = 1), (m, nhead, c)).reshape(-1, c)
        bbox_pred = bbox_pred.reshape(n * nhead, -1, 4)
        
        pred_boxes = restore_bbox(rois[:, 1:5], bbox_pred, config = config)
        cls_prob = F.expand_dims(cls_prob, axis=2)
        pred_boxes = F.concat([pred_boxes, cls_prob], axis=2)
        n, c = bbox_pred.shape[:2]
        bid = F.broadcast_to(F.expand_dims(rois[:, :1], axis=1), (n, c, 1))
        pred_boxes = F.concat([pred_boxes, bid], axis = 2)

        return pred_boxes.detach()
예제 #17
0
    def refinement_module(self, prob, fc2):
        
        m = prob.reshape(-1, 5*self.n)
        offsets, scores = m[:, :-self.n], m[:, -self.n:]
        n = offsets.shape[0]
        offsets = offsets.reshape(-1, self.n, 4)
        cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
        pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
        n, c = pred_boxes.shape
        pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)

        n, c = fc2.shape
        fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
        fc3 = F.concat([fc3, pred_boxes], axis=1)
        fc3 = self.relu(self.fc3(fc3))
        fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)

        a = self.q(fc3[0])
        b = self.r(fc3[1])
        prob = F.stack([a, b], axis=1).reshape(-1, 10*self.n)
        return prob
예제 #18
0
    def test_func(T, C, N):
        input = np.random.randn(T, N, C)
        input = F.softmax(tensor(input), axis=-1).numpy()
        input_lengths = np.ones(N, dtype=np.int32) * T
        target_lengths = np.random.randint(low=1,
                                           high=T + 1,
                                           size=(N, ),
                                           dtype=np.int32)
        target = np.random.randint(low=1,
                                   high=C,
                                   size=(sum(target_lengths)),
                                   dtype=np.int32)

        input_mge = tensor(input)
        input_lengths_mge = tensor(input_lengths)

        target_mge = tensor(target)
        target_lengths_mge = tensor(target_lengths)

        blank = np.random.randint(C)
        for method in ["mean", "sum", "none"]:
            np_out = ctc_nll_naive_npy(
                input,
                input_lengths,
                target,
                target_lengths,
                blank=blank,
                reduction=method,
                time_major=True,
            )
            mge_out = F.nn.ctc_loss(
                input_mge,
                input_lengths_mge,
                target_mge,
                target_lengths_mge,
                blank=blank,
                reduction=method,
            )
            np.testing.assert_allclose(mge_out.numpy(), np_out, rtol=2e-6)
예제 #19
0
 def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
     # stride: 64,32,16,8,4 -> 4, 8, 16, 32
     fpn_fms = fpn_fms[1:][::-1]
     stride = [4, 8, 16, 32]
     pool_features, rcnn_rois, labels, bbox_targets = roi_pool(
             fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
             labels, bbox_targets)
     flatten_feature = F.flatten(pool_features, start_axis=1)
     roi_feature = F.relu(self.fc1(flatten_feature))
     roi_feature = F.relu(self.fc2(roi_feature))
     pred_cls = self.pred_cls(roi_feature)
     pred_delta = self.pred_delta(roi_feature)
     if self.training:
         # loss for regression
         labels = labels.astype(np.int32).reshape(-1)
         pos_masks = labels > 0
         localization_loss = smooth_l1_loss(
             pred_delta,
             bbox_targets,
             config.rcnn_smooth_l1_beta)
         localization_loss = localization_loss * pos_masks
         # loss for classification
         valid_masks = labels >= 0
         objectness_loss = softmax_loss(
             pred_cls,
             labels)
         objectness_loss = objectness_loss * valid_masks
         normalizer = 1.0 / (valid_masks.sum())
         loss_rcnn_cls = objectness_loss.sum() * normalizer
         loss_rcnn_loc = localization_loss.sum() * normalizer
         loss_dict = {}
         loss_dict['loss_rcnn_cls'] = loss_rcnn_cls
         loss_dict['loss_rcnn_loc'] = loss_rcnn_loc
         return loss_dict
     else:
         pred_scores = F.softmax(pred_cls)
         pred_bbox = restore_bbox(rcnn_rois[:, 1:5], pred_delta, True)
         pred_bbox = F.concat([pred_bbox, pred_scores[:, 1].reshape(-1,1)], axis=1)
         return pred_bbox
예제 #20
0
def test_compile_multi_times_eager():
    return  # XXX: rewrite or remove this test
    data = Input("data", shape=(2, 28))
    label = Input("label", shape=(2, ), dtype=np.int32)

    mlp = MLP()
    opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)

    pred0 = mlp(data)
    pred = F.softmax(pred0)
    loss = F.square_loss(pred, label.reshape(2, 1))
    opt.zero_grad()
    grads = opt.backward(loss)
    opt.step()

    f0 = compile(pred, None)
    f1 = compile([pred, loss], grads, copy=False)
    for _ in range(3):
        data = np.random.random((2, 28)).astype(np.float32)
        label = np.random.randint(0, 10, (2, )).astype(np.float32)
        out0 = f0(data=data)
        out1 = f1(data=data, label=label)
        assertTensorClose(out0[0], out1[0])
예제 #21
0
def test_release_memory():
    mnist_datasets = load_mnist_datasets()
    data_train, label_train = mnist_datasets["train"]

    batch_size = 15000
    data_shape = (batch_size, 1, 28, 28)
    label_shape = (batch_size, )

    data = nn.Input("data", shape=data_shape, dtype=np.float32)
    label = nn.Input("label",
                     shape=label_shape,
                     dtype=np.int32,
                     value=np.zeros(label_shape))

    net = MnistNet()
    opt = SGD(net.parameters(), lr=0.01)

    pred = F.softmax(net(data))
    loss = F.cross_entropy(pred, label)

    opt.zero_grad()
    opt.backward(loss)
    add_updates = opt.step()

    mge.graph._default_graph.get_default().clear_device_memory()

    f = mge.graph.compile(loss, add_updates)

    for _ in range(3):
        train_loss = 0.0
        for i in range(0, data_train.shape[0], batch_size):
            opt.zero_grad()
            data = data_train[i:i + batch_size, :, :, :]
            label = label_train[i:i + batch_size]
            loss = f(data=data, label=label)[0]
            train_loss += loss[0]
예제 #22
0
 def fun(data, *, net):
     pred = net(data)
     pred_normalized = F.softmax(pred)
     return pred_normalized
예제 #23
0
 def infer_func(processed_img):
     model.eval()
     logits = model(processed_img)
     probs = F.softmax(logits)
     return probs
예제 #24
0
 def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
     # stride: 64,32,16,8,4 -> 4, 8, 16, 32
     fpn_fms = fpn_fms[1:][::-1]
     stride = [4, 8, 16, 32]
     pool_features, rcnn_rois, labels, bbox_targets = roi_pool(
             fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
             labels, bbox_targets)
     flatten_feature = F.flatten(pool_features, start_axis=1)
     roi_feature = F.relu(self.fc1(flatten_feature))
     roi_feature = F.relu(self.fc2(roi_feature))
     pred_emd_pred_cls_0 = self.emd_pred_cls_0(roi_feature)
     pred_emd_pred_delta_0 = self.emd_pred_delta_0(roi_feature)
     pred_emd_pred_cls_1 = self.emd_pred_cls_1(roi_feature)
     pred_emd_pred_delta_1 = self.emd_pred_delta_1(roi_feature)
     pred_emd_scores_0 = F.softmax(pred_emd_pred_cls_0)
     pred_emd_scores_1 = F.softmax(pred_emd_pred_cls_1)
     # make refine feature
     box_0 = F.concat((pred_emd_pred_delta_0,
         pred_emd_scores_0[:, 1][:, None]), axis=1)[:, None, :]
     box_1 = F.concat((pred_emd_pred_delta_1,
         pred_emd_scores_1[:, 1][:, None]), axis=1)[:, None, :]
     boxes_feature_0 = box_0.broadcast(
             box_0.shapeof()[0], 4, box_0.shapeof()[-1]).reshape(box_0.shapeof()[0], -1)
     boxes_feature_1 = box_1.broadcast(
             box_1.shapeof()[0], 4, box_1.shapeof()[-1]).reshape(box_1.shapeof()[0], -1)
     boxes_feature_0 = F.concat((roi_feature, boxes_feature_0), axis=1)
     boxes_feature_1 = F.concat((roi_feature, boxes_feature_1), axis=1)
     refine_feature_0 = F.relu(self.fc3(boxes_feature_0))
     refine_feature_1 = F.relu(self.fc3(boxes_feature_1))
     # refine
     pred_ref_pred_cls_0 = self.ref_pred_cls_0(refine_feature_0)
     pred_ref_pred_delta_0 = self.ref_pred_delta_0(refine_feature_0)
     pred_ref_pred_cls_1 = self.ref_pred_cls_1(refine_feature_1)
     pred_ref_pred_delta_1 = self.ref_pred_delta_1(refine_feature_1)
     if self.training:
         loss0 = emd_loss(
                     pred_emd_pred_delta_0, pred_emd_pred_cls_0,
                     pred_emd_pred_delta_1, pred_emd_pred_cls_1,
                     bbox_targets, labels)
         loss1 = emd_loss(
                     pred_emd_pred_delta_1, pred_emd_pred_cls_1,
                     pred_emd_pred_delta_0, pred_emd_pred_cls_0,
                     bbox_targets, labels)
         loss2 = emd_loss(
                     pred_ref_pred_delta_0, pred_ref_pred_cls_0,
                     pred_ref_pred_delta_1, pred_ref_pred_cls_1,
                     bbox_targets, labels)
         loss3 = emd_loss(
                     pred_ref_pred_delta_1, pred_ref_pred_cls_1,
                     pred_ref_pred_delta_0, pred_ref_pred_cls_0,
                     bbox_targets, labels)
         loss_rcnn = F.concat([loss0, loss1], axis=1)
         loss_ref = F.concat([loss2, loss3], axis=1)
         indices_rcnn = F.argmin(loss_rcnn, axis=1)
         indices_ref = F.argmin(loss_ref, axis=1)
         loss_rcnn = F.indexing_one_hot(loss_rcnn, indices_rcnn, 1)
         loss_ref = F.indexing_one_hot(loss_ref, indices_ref, 1)
         loss_rcnn = loss_rcnn.sum()/loss_rcnn.shapeof()[0]
         loss_ref = loss_ref.sum()/loss_ref.shapeof()[0]
         loss_dict = {}
         loss_dict['loss_rcnn_emd'] = loss_rcnn
         loss_dict['loss_ref_emd'] = loss_ref
         return loss_dict
     else:
         pred_ref_scores_0 = F.softmax(pred_ref_pred_cls_0)
         pred_ref_scores_1 = F.softmax(pred_ref_pred_cls_1)
         pred_bbox_0 = restore_bbox(rcnn_rois[:, 1:5], pred_ref_pred_delta_0, True)
         pred_bbox_1 = restore_bbox(rcnn_rois[:, 1:5], pred_ref_pred_delta_1, True)
         pred_bbox_0 = F.concat([pred_bbox_0, pred_ref_scores_0[:, 1].reshape(-1,1)], axis=1)
         pred_bbox_1 = F.concat([pred_bbox_1, pred_ref_scores_1[:, 1].reshape(-1,1)], axis=1)
         pred_bbox = F.concat((pred_bbox_0, pred_bbox_1), axis=1).reshape(-1,5)
         return pred_bbox
예제 #25
0
    def forward(self, x):
        x = F.linear(x, self.weight, self.bias)
        logit = F.linear(x, self.weight2, self.bias2)
        prob = F.softmax(logit)

        return prob
예제 #26
0
 def forward(self, a):
     return F.softmax(a)
예제 #27
0
파일: xornet.py 프로젝트: ztjryg4/MegEngine
def pred_fun(data, net=None):
    net.eval()
    pred = net(data)
    pred_normalized = F.softmax(pred)
    return pred_normalized
예제 #28
0
파일: xornet.py 프로젝트: mozre/MegEngine
 def pred_fun(data):
     pred = net(data)
     pred_normalized = F.softmax(pred)
     return pred_normalized
예제 #29
0
 ),
 ("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
 (
     "split",
     lambda x: MF.split(x, 5),
     lambda x: torch.split(x, 5),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 ("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)],
  True, 1000),
 (
     "softmax",
     lambda x: MF.softmax(x, axis=1),
     lambda x: TF.softmax(x, dim=1),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
     "softplus",
     MF.softplus,
     TF.softplus,
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
예제 #30
0
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
                           all_anchors_list, im_info):
    prev_nms_top_n = config.train_prev_nms_top_n \
        if is_train else config.test_prev_nms_top_n
    post_nms_top_n = config.train_post_nms_top_n \
        if is_train else config.test_post_nms_top_n
    batch_per_gpu = config.batch_per_gpu if is_train else 1
    nms_threshold = config.rpn_nms_threshold
    box_min_size = config.rpn_min_box_size
    bbox_normalize_targets = config.rpn_bbox_normalize_targets
    bbox_normalize_means = config.bbox_normalize_means
    bbox_normalize_stds = config.bbox_normalize_stds

    list_size = len(rpn_bbox_offsets_list)

    return_rois, return_probs = [], []
    batch_per_gpu = rpn_cls_prob_list[0].shape[0]
    for bid in range(batch_per_gpu):
        batch_proposals_list = []
        batch_probs_list = []
        for l in range(list_size):
            # get proposals and probs
            offsets = rpn_bbox_offsets_list[l][bid] \
                .transpose(1, 2, 0).reshape(-1, 4)
            if bbox_normalize_targets:
                std_opr = tensor(config.bbox_normalize_stds[None, :])
                mean_opr = tensor(config.bbox_normalize_means[None, :])
                pred_offsets = pred_offsets * std_opr
                pred_offsets = pred_offsets + mean_opr
            all_anchors = all_anchors_list[l]

            proposals = bbox_transform_inv_opr(all_anchors, offsets)
            if config.anchor_within_border:
                proposals = clip_boxes_opr(proposals, im_info[bid, :])
            probs = rpn_cls_prob_list[l][bid] \
                    .transpose(1,2,0).reshape(-1, 2)
            probs = F.softmax(probs)[:, 1]
            # gather the proposals and probs
            batch_proposals_list.append(proposals)
            batch_probs_list.append(probs)

        batch_proposals = F.concat(batch_proposals_list, axis=0)
        batch_probs = F.concat(batch_probs_list, axis=0)
        # filter the boxes with small size.
        wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1
        thresh = box_min_size * im_info[bid, 2]
        keep_mask = F.prod((wh >= thresh), axis=1)
        keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
        keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask)

        inds = inds.astype(np.int32)
        # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0)
        # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0)
        batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds]

        # prev_nms_top_n
        num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0])
        idx = F.argsort(batch_probs, descending=True)
        topk_idx = idx[:num_proposals].reshape(-1)
        batch_proposals = batch_proposals[topk_idx].detach()
        batch_probs = batch_probs[topk_idx].detach()

        # For each image, run a total-level NMS, and choose topk results.
        keep_inds = nms(batch_proposals,
                        batch_probs,
                        nms_threshold,
                        max_output=2000)
        # num = F.minimum(post_nms_top_n, keep_inds.shape[0])
        # keep_inds = keep_inds[:num]

        batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[
            keep_inds]

        # cons the rois
        batch_inds = F.ones((batch_rois.shape[0], 1)) * bid
        batch_rois = F.concat([batch_inds, batch_rois[:, :4]], axis=1)
        return_rois.append(batch_rois)
        return_probs.append(batch_probs)

    if batch_per_gpu == 1:
        return batch_rois, batch_probs
    else:
        concated_rois = F.concat(return_rois, axis=0)
        concated_probs = F.concat(return_probs, axis=0)
        return concated_rois, concated_probs