示例#1
0
文件: loss.py 项目: yrpang/mindspore
 def __init__(self, num_classes, num_boxes, neg_pre_positive, batch_size):
     super(MultiBoxLoss, self).__init__()
     self.num_classes = num_classes
     self.num_boxes = num_boxes
     self.neg_pre_positive = neg_pre_positive
     self.notequal = P.NotEqual()
     self.less = P.Less()
     self.tile = P.Tile()
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.expand_dims = P.ExpandDims()
     self.smooth_l1_loss = P.SmoothL1Loss()
     self.cross_entropy = SoftmaxCrossEntropyWithLogits()
     self.maximum = P.Maximum()
     self.minimum = P.Minimum()
     self.sort_descend = P.TopK(True)
     self.sort = P.TopK(True)
     self.gather = P.GatherNd()
     self.max = P.ReduceMax()
     self.log = P.Log()
     self.exp = P.Exp()
     self.concat = P.Concat(axis=1)
     self.reduce_sum2 = P.ReduceSum(keep_dims=True)
     self.idx = Tensor(
         np.reshape(np.arange(batch_size * num_boxes), (-1, 1)), ms.int32)
示例#2
0
    def __init__(self, config, batch_size, in_channels, feat_channels,
                 num_anchors, cls_out_channels):
        super(RPN, self).__init__()
        cfg_rpn = config
        self.dtype = np.float32
        self.ms_type = mstype.float32
        self.num_bboxes = cfg_rpn.num_bboxes
        self.slice_index = ()
        self.feature_anchor_shape = ()
        self.slice_index += (0, )
        index = 0
        for shape in cfg_rpn.feature_shapes:
            self.slice_index += (self.slice_index[index] +
                                 shape[0] * shape[1] * num_anchors, )
            self.feature_anchor_shape += (shape[0] * shape[1] * num_anchors *
                                          batch_size, )
            index += 1

        self.num_anchors = num_anchors
        self.batch_size = batch_size
        self.test_batch_size = cfg_rpn.test_batch_size
        self.num_layers = 5
        self.real_ratio = Tensor(np.ones((1, 1)).astype(self.dtype))

        self.rpn_convs_list = nn.layer.CellList(
            self._make_rpn_layer(self.num_layers, in_channels, feat_channels,
                                 num_anchors, cls_out_channels))

        self.transpose = P.Transpose()
        self.reshape = P.Reshape()
        self.concat = P.Concat(axis=0)
        self.fill = P.Fill()
        self.placeh1 = Tensor(np.ones((1, )).astype(self.dtype))

        self.trans_shape = (0, 2, 3, 1)

        self.reshape_shape_reg = (-1, 4)
        self.reshape_shape_cls = (-1, )
        self.rpn_loss_reg_weight = Tensor(
            np.array(cfg_rpn.rpn_loss_reg_weight).astype(self.dtype))
        self.rpn_loss_cls_weight = Tensor(
            np.array(cfg_rpn.rpn_loss_cls_weight).astype(self.dtype))
        self.num_expected_total = Tensor(
            np.array(cfg_rpn.num_expected_neg * self.batch_size).astype(
                self.dtype))
        self.num_bboxes = cfg_rpn.num_bboxes
        self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size,
                                            self.num_bboxes, False)
        self.CheckValid = P.CheckValid()
        self.sum_loss = P.ReduceSum()
        self.loss_cls = P.SigmoidCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0 / 9.0)
        self.squeeze = P.Squeeze()
        self.cast = P.Cast()
        self.tile = P.Tile()
        self.zeros_like = P.ZerosLike()
        self.loss = Tensor(np.zeros((1, )).astype(self.dtype))
        self.clsloss = Tensor(np.zeros((1, )).astype(self.dtype))
        self.regloss = Tensor(np.zeros((1, )).astype(self.dtype))
示例#3
0
    def __init__(self, config, batch_size, in_channels, feat_channels,
                 num_anchors, cls_out_channels):
        super(RPN, self).__init__()
        cfg_rpn = config
        self.cfg = config
        self.num_bboxes = cfg_rpn.num_bboxes
        self.feature_anchor_shape = cfg_rpn.feature_shapes
        self.feature_anchor_shape = self.feature_anchor_shape[0] * \
            self.feature_anchor_shape[1] * num_anchors * batch_size
        self.num_anchors = num_anchors
        self.batch_size = batch_size
        self.test_batch_size = cfg_rpn.test_batch_size
        self.num_layers = 1
        self.real_ratio = Tensor(np.ones((1, 1)).astype(np.float16))
        self.use_sigmoid_cls = config.use_sigmoid_cls
        if config.use_sigmoid_cls:
            self.reshape_shape_cls = (-1, )
            self.loss_cls = P.SigmoidCrossEntropyWithLogits()
            cls_out_channels = 1
        else:
            self.reshape_shape_cls = (-1, cls_out_channels)
            self.loss_cls = nn.SoftmaxCrossEntropyWithLogits(sparse=True,
                                                             reduction="none")
        self.rpn_convs_list = self._make_rpn_layer(self.num_layers, in_channels, feat_channels,\
            num_anchors, cls_out_channels)

        self.transpose = P.Transpose()
        self.reshape = P.Reshape()
        self.concat = P.Concat(axis=0)
        self.fill = P.Fill()
        self.placeh1 = Tensor(np.ones((1, )).astype(np.float16))

        self.trans_shape = (0, 2, 3, 1)

        self.reshape_shape_reg = (-1, 4)
        self.softmax = nn.Softmax()
        self.rpn_loss_reg_weight = Tensor(
            np.array(cfg_rpn.rpn_loss_reg_weight).astype(np.float16))
        self.rpn_loss_cls_weight = Tensor(
            np.array(cfg_rpn.rpn_loss_cls_weight).astype(np.float16))
        self.num_expected_total = Tensor(
            np.array(cfg_rpn.num_expected_neg * self.batch_size).astype(
                np.float16))
        self.num_bboxes = cfg_rpn.num_bboxes
        self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size,
                                            self.num_bboxes, False)
        self.CheckValid = P.CheckValid()
        self.sum_loss = P.ReduceSum()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0 / 9.0)
        self.squeeze = P.Squeeze()
        self.cast = P.Cast()
        self.tile = P.Tile()
        self.zeros_like = P.ZerosLike()
        self.loss = Tensor(np.zeros((1, )).astype(np.float16))
        self.clsloss = Tensor(np.zeros((1, )).astype(np.float16))
        self.regloss = Tensor(np.zeros((1, )).astype(np.float16))
        self.print = P.Print()
示例#4
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)):
        super(RcnnCls, self).__init__()
        cfg = config
        self.rcnn_loss_cls_weight = Tensor(
            np.array(cfg.rcnn_loss_cls_weight).astype(np.float16))
        self.rcnn_loss_reg_weight = Tensor(
            np.array(cfg.rcnn_loss_reg_weight).astype(np.float16))
        self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels
        self.train_batch_size = batch_size
        self.test_batch_size = cfg.test_batch_size

        self.fpn_cls = FpnCls(self.in_channels, self.rcnn_fc_out_channels,
                              self.num_classes, cfg.roi_layer["out_size"])
        self.relu = P.ReLU()
        self.logicaland = P.LogicalAnd()
        self.loss_cls = P.SoftmaxCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0)
        self.loss_mask = P.SigmoidCrossEntropyWithLogits()
        self.reshape = P.Reshape()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.gather = P.GatherNd()
        self.argmax = P.ArgMaxWithValue(axis=1)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.value = Tensor(1.0, mstype.float16)

        self.num_bboxes = (cfg.num_expected_pos_stage2 +
                           cfg.num_expected_neg_stage2) * batch_size

        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes, ))
        self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))

        self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size
示例#5
0
 def __init__(self, network):
     super(NetWithLoss, self).__init__()
     self.loss = P.SmoothL1Loss()
     self.network = network
示例#6
0
    def __init__(self,
                 config,
                 representation_size,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)):
        super(Rcnn, self).__init__()
        cfg = config
        self.rcnn_loss_cls_weight = Tensor(
            np.array(cfg.rcnn_loss_cls_weight).astype(np.float16))
        self.rcnn_loss_reg_weight = Tensor(
            np.array(cfg.rcnn_loss_reg_weight).astype(np.float16))
        self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels
        self.train_batch_size = batch_size
        self.test_batch_size = cfg.test_batch_size
        self.use_ambigous_sample = cfg.use_ambigous_sample

        shape_0 = (self.rcnn_fc_out_channels, representation_size)
        weights_0 = initializer("XavierUniform",
                                shape=shape_0[::-1],
                                dtype=mstype.float16).to_tensor()
        shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
        weights_1 = initializer("XavierUniform",
                                shape=shape_1[::-1],
                                dtype=mstype.float16).to_tensor()
        self.shared_fc_0 = DenseNoTranpose(representation_size,
                                           self.rcnn_fc_out_channels,
                                           weights_0)
        self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels,
                                           self.rcnn_fc_out_channels,
                                           weights_1)

        cls_weight = initializer(
            'Normal',
            shape=[num_classes, self.rcnn_fc_out_channels][::-1],
            dtype=mstype.float16).to_tensor()
        reg_weight = initializer(
            'Normal',
            shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
            dtype=mstype.float16).to_tensor()
        self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels,
                                          num_classes, cls_weight)
        self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels,
                                          num_classes * 4, reg_weight)

        self.flatten = P.Flatten()
        self.relu = P.ReLU()
        self.logicaland = P.LogicalAnd()
        self.loss_cls = P.SoftmaxCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0)
        self.reshape = P.Reshape()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.equal = P.Equal()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.gather = P.GatherNd()
        self.argmax = P.ArgMaxWithValue(axis=1)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.value = Tensor(1.0, mstype.float16)

        self.num_bboxes = (cfg.num_expected_pos_stage2 +
                           cfg.num_expected_neg_stage2) * batch_size
        if self.use_ambigous_sample:
            self.num_bboxes = (cfg.num_expected_pos_stage2 +
                               cfg.num_expected_amb_stage2 +
                               cfg.num_expected_neg_stage2) * batch_size

        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes, ))
        self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))

        self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size

        range_max = np.arange(self.num_bboxes_test).astype(np.int32)
        self.range_max = Tensor(range_max)
示例#7
0
 def __init__(self, beta=1.0):
     super(SmoothL1Loss, self).__init__()
     self.beta = beta
     self.smooth_l1_loss = P.SmoothL1Loss(self.beta)
示例#8
0
                     Tensor(np.ones((2, 4), np.int32))),
     'desc_bprop': [[2]]}),
 ('ScatterNdUpdate', {
     'block': P.ScatterNdUpdate(),
     'desc_inputs': (Tensor(np.ones((2, 3), np.float32)),
                     Tensor(np.ones((2, 2), np.int32)),
                     Tensor(np.ones((2,), np.float32))),
     'desc_bprop': [[2, 3]]}),
 ('ScatterNd', {
     'block': P.ScatterNd(),
     'desc_const': [(3, 3)],
     'desc_inputs': (Tensor(np.ones((2, 2), np.int32)),
                     Tensor(np.ones((2,), np.int32))),
     'desc_bprop': [[3, 3]]}),
 ('SmoothL1Loss', {
     'block': P.SmoothL1Loss(),
     'desc_inputs': [[256, 4], [256, 4]],
     'desc_bprop': [[256, 4]]}),
 ('IOU', {
     'block': P.IOU(),
     'desc_inputs': [Tensor(np.ones((256, 4), np.float16)), Tensor(np.ones((128, 4), np.float16))],
     'desc_bprop': [[128, 256]]}),
 ('Summary', {
     'block': SummaryNet(),
     'desc_inputs': [Tensor(np.array([1.1]).astype(np.float32)),
                     Tensor(np.array([1.2]).astype(np.float32))],
     'skip': ['backward']}),
 ('ConfusionMulGrad_1', {
     'block': P.ConfusionMulGrad(axis = [0], keep_dims = False),
     'desc_inputs': [[3, 2], [3, 2], [3, 2]],
     'desc_bprop': [[3, 2], [2]],
示例#9
0
 def __init__(self, sigma=1.0):
     super(SmoothL1Loss, self).__init__()
     self.sigma = sigma
     self.smooth_l1_loss = P.SmoothL1Loss(self.sigma)
 def __init__(self, sigma=1.0):
     super(Net, self).__init__()
     self.SmoothL1Loss = P.SmoothL1Loss(sigma)
示例#11
0
    def __init__(self, num_classes, anchors, anchors_mask, reduction=32, seen=0, coord_scale=1.0, no_object_scale=1.0,
                 object_scale=1.0, class_scale=1.0, thresh=0.5, head_idx=0.0):
        super(YoloLoss, self).__init__()
        self.num_classes = num_classes
        self.num_anchors = len(anchors_mask)
        self.anchor_step = len(anchors[0])  # each scale has step anchors
        self.anchors = np.array(anchors, dtype=np.float32) / reduction  # scale every anchor for every scale
        self.tensor_anchors = Tensor(self.anchors, mstype.float32)
        self.anchors_mask = anchors_mask
        anchors_w = []
        anchors_h = []
        for i in range(len(anchors_mask)):
            anchors_w.append(self.anchors[self.anchors_mask[i]][0])
            anchors_h.append(self.anchors[self.anchors_mask[i]][1])
        self.anchors_w = Tensor(np.array(anchors_w).reshape(len(self.anchors_mask), 1))
        self.anchors_h = Tensor(np.array(anchors_h).reshape(len(self.anchors_mask), 1))

        self.reduction = reduction
        self.seen = seen
        self.head_idx = head_idx
        self.zero = Tensor(0)
        self.coord_scale = coord_scale
        self.no_object_scale = no_object_scale
        self.object_scale = object_scale
        self.class_scale = class_scale
        self.thresh = thresh

        self.info = {'avg_iou': 0, 'class': 0, 'obj': 0, 'no_obj': 0,
                     'recall50': 0, 'recall75': 0, 'obj_cur': 0, 'obj_all': 0,
                     'coord_xy': 0, 'coord_wh': 0}

        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.sigmoid = P.Sigmoid()
        self.zeros_like = P.ZerosLike()


        self.concat0 = P.Concat(0)
        self.concat0_2 = P.Concat(0)
        self.concat0_3 = P.Concat(0)
        self.concat0_4 = P.Concat(0)
        self.concat1 = P.Concat(1)
        self.concat1_2 = P.Concat(1)
        self.concat1_3 = P.Concat(1)
        self.concat1_4 = P.Concat(1)
        self.concat2 = P.Concat(2)
        self.concat2_2 = P.Concat(2)
        self.concat2_3 = P.Concat(2)
        self.concat2_4 = P.Concat(2)

        self.tile = P.Tile()
        self.transpose = P.Transpose()


        self.cast = P.Cast()
        self.exp = P.Exp()
        self.sum = P.ReduceSum()




        self.smooth_l1_loss = P.SmoothL1Loss()









        self.bce = P.SigmoidCrossEntropyWithLogits()
        self.ce = P.SoftmaxCrossEntropyWithLogits()

        self.pt_linspace = PtLinspace()
        self.one_hot = nn.OneHot(-1, self.num_classes, 1.0, 0.0)
        self.squeeze_2 = P.Squeeze(2)


        self.reduce_sum = P.ReduceSum()



        self.select = P.Select()
        self.iou = P.IOU()