示例#1
0
    def __init__(self, num_classes, cur_anchors, conf_thresh, network_size,
                 reduction, anchors_mask):
        super(YoloPostProcess, self).__init__()
        self.print = P.Print()
        self.num_classes = num_classes
        self.anchors = cur_anchors
        self.conf_thresh = conf_thresh
        self.network_size = network_size
        self.reduction = reduction
        self.anchors_mask = anchors_mask
        self.num_anchors = len(anchors_mask)

        anchors_w = []
        anchors_h = []
        for i in range(len(self.anchors_mask)):
            anchors_w.append(self.anchors[i][0])
            anchors_h.append(self.anchors[i][1])
        self.anchors_w = Tensor(
            np.array(anchors_w).reshape((1, len(self.anchors_mask), 1)))
        self.anchors_h = Tensor(
            np.array(anchors_h).reshape((1, len(self.anchors_mask), 1)))

        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.sigmoid = P.Sigmoid()
        self.cast = P.Cast()
        self.exp = P.Exp()
        self.concat3 = P.Concat(3)
        self.tile = P.Tile()
        self.expand_dims = P.ExpandDims()
        self.pt_linspace = PtLinspace()
示例#2
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        """
        Constructor of Softplus Bijector.
        """
        param = dict(locals())
        param['param_dict'] = {'sharpness': sharpness}
        super(Softplus, self).__init__(name=name, dtype=None, param=param)
        self._sharpness = self._add_parameter(sharpness, 'sharpness')

        self.exp = exp_generic
        self.log = log_generic
        self.expm1 = P.Expm1()
        self.abs = P.Abs()
        self.dtypeop = P.DType()
        self.cast = P.Cast()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.less = P.Less()
        self.log_sigmoid = LogSigmoid()
        self.logicalor = P.LogicalOr()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus

        self.threshold = np.log(np.finfo(np.float32).eps) + 1
        self.tiny = np.exp(self.threshold)
示例#3
0
    def __init__(self, batch_size=4):
        super(DiceLoss, self).__init__()

        self.threshold0 = Tensor(0.5, mstype.float32)
        self.zero_float32 = Tensor(0.0, mstype.float32)
        self.k = int(640 * 640)
        self.negative_one_int32 = Tensor(-1, mstype.int32)
        self.batch_size = batch_size
        self.concat = P.Concat()
        self.less_equal = P.LessEqual()
        self.greater = P.Greater()
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True)
        self.reduce_mean = P.ReduceMean()
        self.reduce_min = P.ReduceMin()
        self.cast = P.Cast()
        self.minimum = P.Minimum()
        self.expand_dims = P.ExpandDims()
        self.select = P.Select()
        self.fill = P.Fill()
        self.topk = P.TopK(sorted=True)
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.reshape = P.Reshape()
        self.slice = P.Slice()
        self.logical_and = P.LogicalAnd()
        self.logical_or = P.LogicalOr()
        self.equal = P.Equal()
        self.zeros_like = P.ZerosLike()
        self.add = P.TensorAdd()
        self.gather = P.Gather()
示例#4
0
    def __init__(self, strategy_dict=None):
        super().__init__()
        shared_np = np.full((16, 1, 32, 32), 0.5, dtype=np.float32)
        self.shared_weight = Parameter(Tensor(shared_np), name='shared_weight')
        self.fc1 = Dense(in_channels=1024,
                         out_channels=116,
                         weight_init='ones',
                         bias_init='ones',
                         has_bias=True)
        self.relu = ReLU()
        self.sigmoid = P.Sigmoid()
        self.add1 = P.TensorAdd()
        self.add2 = P.TensorAdd()
        self.mul1 = P.Mul().add_prim_attr('primitive_target', 'CPU')
        self.mul2 = P.Mul()
        self.mul3 = P.Mul()
        self.flatten = Flatten()

        mul2_weight_np = np.full((16, 116), 1, dtype=np.float32)
        self.mul2_weight = Parameter(Tensor(mul2_weight_np),
                                     name='mul2_weight')

        mul3_weight_np = np.full((16, 116), 1, dtype=np.float32)
        self.mul3_weight = Parameter(Tensor(mul3_weight_np),
                                     name='mul3_weight')

        if strategy_dict is not None:
            self.add1.shard(strategy_dict['add1'])
            self.mul1.shard(strategy_dict['mul1'])
            self.fc1.matmul.shard(strategy_dict['fc1_matmul'])
            self.fc1.bias_add.shard(strategy_dict['fc1_bias_add'])
            self.mul2.shard(strategy_dict['mul2'])
            self.mul3.shard(strategy_dict['mul3'])
示例#5
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        """
        Constructor of Softplus Bijector.
        """
        param = dict(locals())
        validator.check_value_type('sharpness', sharpness, [int, float],
                                   type(self).__name__)
        super(Softplus, self).__init__(name=name, param=param)
        self._sharpness = cast_to_tensor(sharpness)

        self.exp = exp_generic
        self.log = log_generic
        self.expm1 = expm1_generic
        self.abs = P.Abs()
        self.dtypeop = P.DType()
        self.fill = P.Fill()
        self.greater = P.Greater()
        self.less = P.Less()
        self.log_sigmoid = LogSigmoid()
        self.logicalor = P.LogicalOr()
        self.select = P.Select()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus

        self.threshold = np.log(np.finfo(np.float32).eps) + 1
        self.tiny = np.exp(self.threshold)
示例#6
0
 def __init__(self,
              input_channel=1280,
              num_classes=1000,
              has_dropout=False,
              activation="None"):
     super(MobileNetV2Head, self).__init__()
     # mobilenet head
     head = ([
         GlobalAvgPooling(),
         LastQuantLayer(
             input_channel, num_classes, has_bias=True, has_bn=False)
     ] if not has_dropout else [
         GlobalAvgPooling(),
         nn.Dropout(0.2),
         LastQuantLayer(
             input_channel, num_classes, has_bias=True, has_bn=False)
     ])
     self.head = nn.SequentialCell(head)
     self.need_activation = True
     if activation == "Sigmoid":
         self.activation = P.Sigmoid()
     elif activation == "Softmax":
         self.activation = P.Softmax()
     else:
         self.need_activation = False
     self._initialize_weights()
示例#7
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 use_sigmoid_cls,
                 target_means=(.0, .0, .0, .0),
                 target_stds=(1.0, 1.0, 1.0, 1.0)
                 ):
        super(Proposal, self).__init__()
        cfg = config
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.target_means = target_means
        self.target_stds = target_stds
        self.use_sigmoid_cls = config.use_sigmoid_cls

        if self.use_sigmoid_cls:
            self.cls_out_channels = 1
            self.activation = P.Sigmoid()
            self.reshape_shape = (-1, 1)
        else:
            self.cls_out_channels = num_classes
            self.activation = P.Softmax(axis=1)
            self.reshape_shape = (-1, 2)

        if self.cls_out_channels <= 0:
            raise ValueError('num_classes={} is too small'.format(num_classes))

        self.num_pre = cfg.rpn_proposal_nms_pre
        self.min_box_size = cfg.rpn_proposal_min_bbox_size
        self.nms_thr = cfg.rpn_proposal_nms_thr
        self.nms_post = cfg.rpn_proposal_nms_post
        self.nms_across_levels = cfg.rpn_proposal_nms_across_levels
        self.max_num = cfg.rpn_proposal_max_num

        # Op Define
        self.squeeze = P.Squeeze()
        self.reshape = P.Reshape()
        self.cast = P.Cast()

        self.feature_shapes = cfg.feature_shapes

        self.transpose_shape = (1, 2, 0)

        self.decode = BoundingBoxDecode()

        self.nms = P.NMSWithMask(self.nms_thr)
        self.concat_axis0 = P.Concat(axis=0)
        self.concat_axis1 = P.Concat(axis=1)
        self.split = P.Split(axis=1, output_num=5)
        self.min = P.Minimum()
        self.gatherND = P.GatherNd()
        self.slice = P.Slice()
        self.select = P.Select()
        self.greater = P.Greater()
        self.transpose = P.Transpose()
        self.tile = P.Tile()
        self.set_train_local(config, training=True)

        self.multi_10 = Tensor(10.0, mstype.float16)
示例#8
0
 def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False):
     super(BertReg, self).__init__()
     self.bert = BertRegressionModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings)
     self.loss = nn.MSELoss()
     self.is_training = is_training
     self.sigmoid = P.Sigmoid()
     self.cast = P.Cast()
     self.mul = P.Mul()
示例#9
0
    def __init__(self, config, is_training=True):
        super(SsdMobilenetV1Fpn, self).__init__()
        self.multi_box = WeightSharedMultiBox(config)
        self.is_training = is_training
        if not is_training:
            self.activation = P.Sigmoid()

        self.feature_extractor = mobilenet_v1_fpn(config)
示例#10
0
    def __init__(self, backbone, config, is_training=True):
        super(retinanet50, self).__init__()

        self.backbone = backbone
        feature_size = config.feature_size
        self.P5_1 = nn.Conv2d(2048,
                              256,
                              kernel_size=1,
                              stride=1,
                              pad_mode='same')
        self.P_upsample1 = P.ResizeNearestNeighbor(
            (feature_size[1], feature_size[1]))
        self.P5_2 = nn.Conv2d(256,
                              256,
                              kernel_size=3,
                              stride=1,
                              pad_mode='same')

        self.P4_1 = nn.Conv2d(1024,
                              256,
                              kernel_size=1,
                              stride=1,
                              pad_mode='same')
        self.P_upsample2 = P.ResizeNearestNeighbor(
            (feature_size[0], feature_size[0]))
        self.P4_2 = nn.Conv2d(256,
                              256,
                              kernel_size=3,
                              stride=1,
                              pad_mode='same')

        self.P3_1 = nn.Conv2d(512,
                              256,
                              kernel_size=1,
                              stride=1,
                              pad_mode='same')
        self.P3_2 = nn.Conv2d(256,
                              256,
                              kernel_size=3,
                              stride=1,
                              pad_mode='same')

        self.P6_0 = nn.Conv2d(2048,
                              256,
                              kernel_size=3,
                              stride=2,
                              pad_mode='same')

        self.P7_1 = nn.ReLU()
        self.P7_2 = nn.Conv2d(256,
                              256,
                              kernel_size=3,
                              stride=2,
                              pad_mode='same')
        self.multi_box = MultiBox(config)
        self.is_training = is_training
        if not is_training:
            self.activation = P.Sigmoid()
示例#11
0
 def softmax_relu_pass():
     x = Any()
     softmax_pattern = Prim(P.Softmax())
     pattern = Call(softmax_pattern, [x])
     sigmoid_pattern = Prim(P.Sigmoid())
     call_sigmoid = Call(sigmoid_pattern, [x])
     relu_pattern = Prim(P.ReLU())
     target = Call(relu_pattern, [call_sigmoid])
     return pattern, target
示例#12
0
 def _init_activation(self, act_str):
     act_str = act_str.lower()
     if act_str == "relu":
         act_func = P.ReLU()
     elif act_str == "sigmoid":
         act_func = P.Sigmoid()
     elif act_str == "tanh":
         act_func = P.Tanh()
     return act_func
示例#13
0
 def softmax_relu_pass():
     x = AnyPattern()
     softmax_pattern = IsPrimTypeOf(P.Softmax())
     pattern = CallWith(softmax_pattern, inputs=[x])
     sigmoid_pattern = IsPrimTypeOf(P.Sigmoid(), should_replace=False)
     call_sigmoid = CallWith(sigmoid_pattern, [x])
     relu_pattern = IsPrimTypeOf(P.ReLU(), should_replace=False)
     target = CallWith(relu_pattern, inputs=[call_sigmoid])
     return pattern, target
示例#14
0
 def __init__(self, network):
     super(PredictWithSigmoid, self).__init__()
     self.network = network
     self.sigmoid = P.Sigmoid()
     parallel_mode = context.get_auto_parallel_context("parallel_mode")
     full_batch = context.get_auto_parallel_context("full_batch")
     is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                          ParallelMode.AUTO_PARALLEL)
     if is_auto_parallel and full_batch:
         self.sigmoid.shard(((1, 1), ))
示例#15
0
 def __init__(self, gamma=2.0, alpha=0.25):
     super(SigmoidFocalClassificationLoss, self).__init__()
     self.sigmiod_cross_entropy = P.SigmoidCrossEntropyWithLogits()
     self.sigmoid = P.Sigmoid()
     self.pow = P.Pow()
     self.onehot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
     self.gamma = gamma
     self.alpha = alpha
示例#16
0
 def __init__(self):
     super().__init__()
     self.relu = P.ReLU()
     self.sigmoid = P.Sigmoid()
     self.tanh = P.Tanh()
     self.add = P.Add()
     a = np.full((1, ), 5, dtype=np.float32)
     self.a = Parameter(Tensor(a), name="a")
     b = np.full((1, ), 4, dtype=np.float32)
     self.b = Parameter(Tensor(b), name="b")
     c = np.full((1, ), 7, dtype=np.float32)
     self.c = Parameter(Tensor(c), name="c")
示例#17
0
    def __init__(self, channel, reduction=16):
        super(SEBlock, self).__init__()

        self.avg_pool = GlobalAvgPooling()
        self.fc1 = nn.Dense(channel, channel // reduction)
        self.relu = P.ReLU()
        self.fc2 = nn.Dense(channel // reduction, channel)
        self.sigmoid = P.Sigmoid()
        self.reshape = P.Reshape()
        self.shape = P.Shape()
        self.sum = P.Sum()
        self.cast = P.Cast()
示例#18
0
    def __init__(self, x_dim, z_dim, batch_size):
        super().__init__()
        self.x_dim = x_dim
        self.z_dim = z_dim
        self.batch_size = batch_size

        self.fc1 = nn.Dense(z_dim, 500)
        self.act1 = nn.ReLU()
        self.fc2 = nn.Dense(500, 500)
        self.act2 = nn.ReLU()
        self.fc3 = nn.Dense(500, x_dim)
        self.fill = P.Fill()
        self.sigmoid = P.Sigmoid()
        self.reshape_op = P.Reshape()
示例#19
0
    def __init__(self, sharpness=1.0, name='Softplus'):
        param = dict(locals())
        validator.check_value_type('sharpness', sharpness, [float], name)
        super(Softplus, self).__init__(name=name, param=param)
        self._sharpness = cast_to_tensor(sharpness)

        self.exp = P.Exp()
        self.expm1 = self._expm1_by_step
        self.log_sigmoid = LogSigmoid()
        self.log = P.Log()
        self.sigmoid = P.Sigmoid()

        self.softplus = self._softplus
        self.inverse_softplus = self._inverse_softplus
示例#20
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 seed=None,
                 dtype=mstype.float32,
                 name="Logistic"):
        """
        Constructor of Logistic.
        """
        param = dict(locals())
        param['param_dict'] = {'loc': loc, 'scale': scale}
        valid_dtype = mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Logistic, self).__init__(seed, dtype, name, param)

        self._loc = self._add_parameter(loc, 'loc')
        self._scale = self._add_parameter(scale, 'scale')
        if self._scale is not None:
            check_greater_zero(self._scale, "scale")

        # ops needed for the class
        self.cast = P.Cast()
        self.const = P.ScalarToArray()
        self.consttensor = P.ScalarToTensor()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expm1 = P.Expm1()
        self.fill = P.Fill()
        self.less = P.Less()
        self.log = log_generic
        self.log1p = P.Log1p()
        self.logicalor = P.LogicalOr()
        self.erf = P.Erf()
        self.greater = P.Greater()
        self.sigmoid = P.Sigmoid()
        self.squeeze = P.Squeeze(0)
        self.select = P.Select()
        self.shape = P.Shape()
        self.softplus = self._softplus
        self.sqrt = P.Sqrt()
        self.uniform = C.uniform

        self.threshold = np.log(np.finfo(np.float32).eps) + 1.
        self.tiny = np.finfo(np.float).tiny
        self.sd_const = np.pi / np.sqrt(3)
示例#21
0
    def __init__(self, backbone, config, is_training=True):
        super(SSD300, self).__init__()

        self.backbone = backbone
        in_channels = config.extras_in_channels
        out_channels = config.extras_out_channels
        ratios = config.extras_ratio
        strides = config.extras_strides
        residual_list = []
        for i in range(2, len(in_channels)):
            residual = InvertedResidual(in_channels[i], out_channels[i], stride=strides[i],
                                        expand_ratio=ratios[i], last_relu=True)
            residual_list.append(residual)
        self.multi_residual = nn.layer.CellList(residual_list)
        self.multi_box = MultiBox(config)
        self.is_training = is_training
        if not is_training:
            self.activation = P.Sigmoid()
示例#22
0
    def __init__(self, config):
        super(ETSNet, self).__init__()
        self.kernel_num = config.KERNEL_NUM
        self.inference = config.INFERENCE
        if config.INFERENCE:
            self.long_size = config.INFER_LONG_SIZE
        else:
            self.long_size = config.TRAIN_LONG_SIZE

        # backbone
        self.feature_extractor = ResNet(ResidualBlock,
                                        config.BACKBONE_LAYER_NUMS,
                                        config.BACKBONE_IN_CHANNELS,
                                        config.BACKBONE_OUT_CHANNELS)

        # neck
        self.feature_fusion = FPN(config.BACKBONE_OUT_CHANNELS,
                                  config.NECK_OUT_CHANNEL, self.long_size)

        # head
        self.conv1 = _conv(4 * config.NECK_OUT_CHANNEL,
                           config.NECK_OUT_CHANNEL,
                           kernel_size=3,
                           stride=1,
                           has_bias=True)
        self.bn1 = _bn(config.NECK_OUT_CHANNEL)
        self.relu1 = nn.ReLU()
        self.conv2 = _conv(config.NECK_OUT_CHANNEL,
                           config.KERNEL_NUM,
                           kernel_size=1,
                           has_bias=True)
        self._upsample = P.ResizeBilinear((self.long_size, self.long_size),
                                          align_corners=True)

        if self.inference:
            self.one_float32 = Tensor(1.0, mstype.float32)
            self.sigmoid = P.Sigmoid()
            self.greater = P.Greater()
            self.logic_and = P.LogicalAnd()

        print('ETSNet initialized!')
示例#23
0
    def __init__(self, residual_channels=None, gate_channels=None, kernel_size=None, skip_out_channels=None, bias=True,
                 dropout=1 - 0.95, dilation=1, cin_channels=-1, gin_channels=-1, padding=None, causal=True):
        super(ResidualConv1dGLU, self).__init__()
        self.dropout = dropout
        self.dropout_op = nn.Dropout(keep_prob=1. - self.dropout)
        self.eval_split_op = P.Split(axis=-1, output_num=2)
        self.train_split_op = P.Split(axis=1, output_num=2)
        self.tanh = P.Tanh()
        self.sigmoid = P.Sigmoid()
        self.mul = P.Mul()
        self.add = P.TensorAdd()

        if skip_out_channels is None:
            skip_out_channels = residual_channels
        if padding is None:
            if causal:
                padding = (kernel_size - 1) * dilation
            else:
                padding = (kernel_size - 1) // 2 * dilation
        self.causal = causal

        self.conv = Conv1d(residual_channels, gate_channels, kernel_size, pad_mode='pad',
                           padding=padding, dilation=dilation, has_bias=bias)

        # local conditioning
        if cin_channels > 0:
            self.conv1x1c = Conv1d1x1(cin_channels, gate_channels, has_bias=False)
        else:
            self.conv1x1c = None

        # global conditioning
        if gin_channels > 0:
            self.conv1x1g = Conv1d(gin_channels, gate_channels, has_bias=False, kernel_size=1, dilation=1)
        else:
            self.conv1x1g = None

        gate_out_channels = gate_channels // 2
        self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, has_bias=bias)
        self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_out_channels, has_bias=bias)
        self.factor = math.sqrt(0.5)
示例#24
0
    def __init__(self, neg_item_num, l2_embed, dist_reg):
        super(BGCFLoss, self).__init__()

        self.neg_item_num = neg_item_num
        self.l2_embed = l2_embed
        self.dist_reg = dist_reg

        self.log = P.Log()
        self.pow = P.Pow()
        self.cast = P.Cast()
        self.tile = P.Tile()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.concat = P.Concat(1)
        self.concat2 = P.Concat(2)
        self.split = P.Split(0, 2)
        self.reduce_sum = P.ReduceSum()
        self.expand_dims = P.ExpandDims()
        self.multiply = P.Mul()
        self.matmul = P.BatchMatMul()
        self.squeeze = P.Squeeze(1)
        self.transpose = P.Transpose()
        self.l2_loss = P.L2Loss()
        self.sigmoid = P.Sigmoid()
示例#25
0
 def __init__(self, network):
     super(PredictWithSigmoid, self).__init__()
     self.network = network
     self.sigmoid = P.Sigmoid()
示例#26
0
 def __init__(self):
     super(Net, self).__init__()
     self.ops = P.Sigmoid()
示例#27
0
 def __init__(self):
     super(Sigmoid, self).__init__()
     self.sigmoid = P.Sigmoid()
示例#28
0
 def softmax_relu_pass():
     x = Any()
     sigmoid_softmax_pattern = Prim([P.Sigmoid(), P.Softmax()])
     pattern = Call(sigmoid_softmax_pattern, [x])
     target = Call(P.ReLU(), [x])
     return pattern, target
示例#29
0
     'desc_bprop': [[1, 3, 4, 4], [1, 3, 4, 4]]}),
 ('ReLUGrad', {
     'block': G.ReluGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'skip': ['backward']}),
 ('Elu', {
     'block': P.Elu(),
     'desc_inputs': [[2, 3, 4]],
     'desc_bprop': [[2, 3, 4]]}),
 ('EluGrad', {
     'block': G.EluGrad(),
     'desc_inputs': [[2, 3, 4], [2, 3, 4]],
     'desc_bprop': [[2, 3, 4]],
     'skip': ['backward']}),
 ('Sigmoid', {
     'block': P.Sigmoid(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('MaxPool', {
     'block': P.MaxPool(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_inputs': [[100, 3, 28, 28]],
     'desc_bprop': [[100, 3, 14, 14]]}),
 ('MaxPoolGrad', {
     'block': G.MaxPoolGrad(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_inputs': [[3, 4, 6, 6], [3, 4, 3, 3], [3, 4, 3, 3]],
     'desc_bprop': [[3, 4, 6, 6]],
     'skip': ['backward']}),
 ('AvgPool', {
     'block': P.AvgPool(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_inputs': [[100, 3, 28, 28]],
     'desc_bprop': [[100, 3, 14, 14]]}),
示例#30
0
    def __init__(self):
        super(EAST, self).__init__()

        #param
        self.TEXT_SCALE = 512
        self.pi = math.pi / 1.
        self.pi = mindspore.Tensor([self.pi], mindspore.float32)

        #network
        self.pvanet = pvanet.PAVNet(True)

        #for i = 0
        self.unpool0 = unpool((32, 32))

        #for i = 1
        self.concat1 = P.Concat(axis=1)
        self.conv1_1 = ops.conv_bn_relu(1280,
                                        128,
                                        stride=1,
                                        kernel_size=1,
                                        padding='valid')
        self.conv1_2 = ops.conv_bn_relu(128,
                                        128,
                                        stride=1,
                                        kernel_size=3,
                                        padding='pad',
                                        padding_number=1)
        self.unpool1 = unpool((64, 64))

        #for i = 2
        self.concat2 = P.Concat(axis=1)
        self.conv2_1 = ops.conv_bn_relu(384,
                                        64,
                                        stride=1,
                                        kernel_size=1,
                                        padding='valid')
        self.conv2_2 = ops.conv_bn_relu(64,
                                        64,
                                        stride=1,
                                        kernel_size=3,
                                        padding='pad',
                                        padding_number=1)
        self.unpool2 = unpool((128, 128))

        #for i = 3
        self.concat3 = P.Concat(axis=1)
        self.conv3_1 = ops.conv_bn_relu(192,
                                        32,
                                        stride=1,
                                        kernel_size=1,
                                        padding='valid')
        self.conv3_2 = ops.conv_bn_relu(32,
                                        32,
                                        stride=1,
                                        kernel_size=3,
                                        padding='pad',
                                        padding_number=1)
        self.conv3_3 = ops.conv_bn_relu(32,
                                        32,
                                        stride=1,
                                        kernel_size=3,
                                        padding='pad',
                                        padding_number=1)

        #output
        ## F_score
        self.conv_for_fscore = ops._conv(32,
                                         1,
                                         stride=1,
                                         kernel_size=1,
                                         padding='valid')
        self.sigmoid_for_fscore = P.Sigmoid()

        ## geo_map
        self.conv_for_geo_map = ops._conv(32,
                                          4,
                                          stride=1,
                                          kernel_size=1,
                                          padding='valid')
        self.sigmoid_for_geo_map = P.Sigmoid()

        ## angle_map
        self.conv_for_angle_map = ops._conv(32,
                                            1,
                                            stride=1,
                                            kernel_size=1,
                                            padding='valid')
        self.sigmoid_for_angle_map = P.Sigmoid()

        ## F_geometry
        self.concat_for_F_geometry = P.Concat(axis=1)

        ## other
        self.mul = P.Mul()
        self.add = P.TensorAdd()