def __init__(self, config, batch_size, in_channels, feat_channels, num_anchors, cls_out_channels): super(RPN, self).__init__() cfg_rpn = config self.dtype = np.float32 self.ms_type = mstype.float32 self.num_bboxes = cfg_rpn.num_bboxes self.slice_index = () self.feature_anchor_shape = () self.slice_index += (0, ) index = 0 for shape in cfg_rpn.feature_shapes: self.slice_index += (self.slice_index[index] + shape[0] * shape[1] * num_anchors, ) self.feature_anchor_shape += (shape[0] * shape[1] * num_anchors * batch_size, ) index += 1 self.num_anchors = num_anchors self.batch_size = batch_size self.test_batch_size = cfg_rpn.test_batch_size self.num_layers = 5 self.real_ratio = Tensor(np.ones((1, 1)).astype(self.dtype)) self.rpn_convs_list = nn.layer.CellList( self._make_rpn_layer(self.num_layers, in_channels, feat_channels, num_anchors, cls_out_channels)) self.transpose = P.Transpose() self.reshape = P.Reshape() self.concat = P.Concat(axis=0) self.fill = P.Fill() self.placeh1 = Tensor(np.ones((1, )).astype(self.dtype)) self.trans_shape = (0, 2, 3, 1) self.reshape_shape_reg = (-1, 4) self.reshape_shape_cls = (-1, ) self.rpn_loss_reg_weight = Tensor( np.array(cfg_rpn.rpn_loss_reg_weight).astype(self.dtype)) self.rpn_loss_cls_weight = Tensor( np.array(cfg_rpn.rpn_loss_cls_weight).astype(self.dtype)) self.num_expected_total = Tensor( np.array(cfg_rpn.num_expected_neg * self.batch_size).astype( self.dtype)) self.num_bboxes = cfg_rpn.num_bboxes self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size, self.num_bboxes, False) self.CheckValid = P.CheckValid() self.sum_loss = P.ReduceSum() self.loss_cls = P.SigmoidCrossEntropyWithLogits() self.loss_bbox = P.SmoothL1Loss(beta=1.0 / 9.0) self.squeeze = P.Squeeze() self.cast = P.Cast() self.tile = P.Tile() self.zeros_like = P.ZerosLike() self.loss = Tensor(np.zeros((1, )).astype(self.dtype)) self.clsloss = Tensor(np.zeros((1, )).astype(self.dtype)) self.regloss = Tensor(np.zeros((1, )).astype(self.dtype))
def __init__(self, config, batch_size, in_channels, feat_channels, num_anchors, cls_out_channels): super(RPN, self).__init__() cfg_rpn = config self.cfg = config self.num_bboxes = cfg_rpn.num_bboxes self.feature_anchor_shape = cfg_rpn.feature_shapes self.feature_anchor_shape = self.feature_anchor_shape[0] * \ self.feature_anchor_shape[1] * num_anchors * batch_size self.num_anchors = num_anchors self.batch_size = batch_size self.test_batch_size = cfg_rpn.test_batch_size self.num_layers = 1 self.real_ratio = Tensor(np.ones((1, 1)).astype(np.float16)) self.use_sigmoid_cls = config.use_sigmoid_cls if config.use_sigmoid_cls: self.reshape_shape_cls = (-1, ) self.loss_cls = P.SigmoidCrossEntropyWithLogits() cls_out_channels = 1 else: self.reshape_shape_cls = (-1, cls_out_channels) self.loss_cls = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="none") self.rpn_convs_list = self._make_rpn_layer(self.num_layers, in_channels, feat_channels,\ num_anchors, cls_out_channels) self.transpose = P.Transpose() self.reshape = P.Reshape() self.concat = P.Concat(axis=0) self.fill = P.Fill() self.placeh1 = Tensor(np.ones((1, )).astype(np.float16)) self.trans_shape = (0, 2, 3, 1) self.reshape_shape_reg = (-1, 4) self.softmax = nn.Softmax() self.rpn_loss_reg_weight = Tensor( np.array(cfg_rpn.rpn_loss_reg_weight).astype(np.float16)) self.rpn_loss_cls_weight = Tensor( np.array(cfg_rpn.rpn_loss_cls_weight).astype(np.float16)) self.num_expected_total = Tensor( np.array(cfg_rpn.num_expected_neg * self.batch_size).astype( np.float16)) self.num_bboxes = cfg_rpn.num_bboxes self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size, self.num_bboxes, False) self.CheckValid = P.CheckValid() self.sum_loss = P.ReduceSum() self.loss_bbox = P.SmoothL1Loss(beta=1.0 / 9.0) self.squeeze = P.Squeeze() self.cast = P.Cast() self.tile = P.Tile() self.zeros_like = P.ZerosLike() self.loss = Tensor(np.zeros((1, )).astype(np.float16)) self.clsloss = Tensor(np.zeros((1, )).astype(np.float16)) self.regloss = Tensor(np.zeros((1, )).astype(np.float16)) self.print = P.Print()
def __init__(self): super(NetCheckValid, self).__init__() self.valid = P.CheckValid()
'add_fack_input': True, 'fack_input_type': np.float32, 'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))], 'skip': ['backward']}), ('NpuGetFloatStatus', { 'block': P.NPUGetFloatStatus(), 'desc_inputs': [Tensor(np.zeros([8]).astype(np.float32))], 'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))], 'skip': ['backward']}), ('NpuClearFloatStatus', { 'block': P.NPUClearFloatStatus(), 'desc_inputs': [Tensor(np.zeros([8]).astype(np.float32))], 'desc_bprop': [Tensor(np.zeros([8]).astype(np.float32))], 'skip': ['backward']}), ('CheckValid', { 'block': P.CheckValid(), 'desc_inputs': [[20000, 4], [3]], 'desc_bprop': [[20000]], 'skip': ['backward']}), ('NMSWithMask', { 'block': P.NMSWithMask(0.5), 'desc_inputs': [[128, 5]], 'desc_bprop': [[128, 5], [128], [128]], 'skip': ['backward']}), ('Abs', { 'block': P.Abs(), 'desc_inputs': [[4]], 'desc_bprop': [[4]]}), ('CumSum', { 'block': P.CumSum(), 'desc_const': [0],