Exemple #1
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 stride=1):
        super(ResidualBlock, self).__init__()

        channel = out_channel // self.expansion
        self.conv1 = _conv1x1(in_channel, channel, stride=1)
        self.bn1 = _bn(channel)

        self.conv2 = _conv3x3(channel, channel, stride=stride)
        self.bn2 = _bn(channel)

        self.conv3 = _conv1x1(channel, out_channel, stride=1)
        self.bn3 = _bn_last(out_channel)

        self.relu = nn.ReLU()

        self.down_sample = False

        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        self.down_sample_layer = None

        if self.down_sample:
            self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride),
                                                        _bn(out_channel)])
        self.add = ops.TensorAdd()
Exemple #2
0
 def __init__(self, net_config, K=100, enable_nms_fp16=True):
     super(DetectionDecode, self).__init__()
     self.K = K
     self.nms = NMS(enable_nms_fp16=enable_nms_fp16)
     self.shape = ops.Shape()
     self.gather_topk = GatherTopK()
     self.half = ops.Split(axis=-1, output_num=2)
     self.add = ops.TensorAdd()
     self.concat_a2 = ops.Concat(axis=2)
     self.trans_gather_feature = TransposeGatherFeature()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.reg_offset = net_config.reg_offset
     self.Sigmoid = nn.Sigmoid()
Exemple #3
0
    def __init__(self, in_channels, out_channels, stride=1):
        """init block"""
        super(ResidualBlock, self).__init__()

        out_chls = out_channels // self.expansion
        self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
        self.bn1 = bn_with_initialize(out_chls)

        self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
        self.bn2 = bn_with_initialize(out_chls)

        self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
        self.bn3 = bn_with_initialize_last(out_channels)

        self.relu = ops.ReLU()
        self.add = ops.TensorAdd()
Exemple #4
0
 def __init__(self,
              in_channels,
              out_channels,
              initializer_range=0.02,
              dropout_prob=0.1,
              compute_type=mstype.float32,
              enable_fused_layernorm=False):
     super(BertOutput, self).__init__()
     self.dense = nn.Dense(in_channels, out_channels,
                           weight_init=TruncatedNormal(initializer_range)).to_float(compute_type)
     self.dropout = nn.Dropout(1 - dropout_prob)
     self.dropout_prob = dropout_prob
     self.add = ops.TensorAdd()
     if compute_type == mstype.float16:
         self.layernorm = FusedLayerNorm((out_channels,),
                                         use_batch_norm=enable_fused_layernorm).to_float(compute_type)
     else:
         self.layernorm = nn.LayerNorm((out_channels,)).to_float(compute_type)
     self.cast = ops.Cast()
Exemple #5
0
 def __init__(self, net_config, K=100, enable_nms_fp16=True):
     super(MultiPoseDecode, self).__init__()
     self.K = K
     self.nms = NMS(enable_nms_fp16=enable_nms_fp16)
     self.shape = ops.Shape()
     self.gather_topk = GatherTopK()
     self.gather_topk_channel = GatherTopKChannel()
     self.gather_by_ind = GatherFeatureByInd()
     self.half = ops.Split(axis=-1, output_num=2)
     self.half_first = ops.Split(axis=0, output_num=2)
     self.split = ops.Split(axis=-1, output_num=4)
     self.flip_lr = FlipLR()
     self.flip_lr_off = FlipLROff()
     self.flip_tensor = FlipTensor()
     self.concat = ops.Concat(axis=1)
     self.concat_a2 = ops.Concat(axis=2)
     self.concat_a3 = ops.Concat(axis=3)
     self.trans_gather_feature = TransposeGatherFeature()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.add = ops.TensorAdd()
     self.dtype = ops.DType()
     self.cast = ops.Cast()
     self.thresh = 0.1
     self.transpose = ops.Transpose()
     self.perm_list = (0, 2, 1, 3)
     self.tile = ops.Tile()
     self.greater = ops.Greater()
     self.square = ops.Square()
     self.sqrt = ops.Sqrt()
     self.reduce_sum = ops.ReduceSum()
     self.min = ops.ArgMinWithValue(axis=3)
     self.max = ops.Maximum()
     self.hm_hp = net_config.hm_hp
     self.dense_hp = net_config.dense_hp
     self.reg_offset = net_config.reg_offset
     self.reg_hp_offset = net_config.reg_hp_offset
     self.hm_hp_ind = 3 if self.hm_hp else 2
     self.reg_ind = self.hm_hp_ind + 1 if self.reg_offset else self.hm_hp_ind
     self.reg_hp_ind = self.reg_ind + 1 if self.reg_hp_offset else self.reg_ind
Exemple #6
0
    def __init__(self,
                 batch_size,
                 from_tensor_width,
                 to_tensor_width,
                 from_seq_length,
                 to_seq_length,
                 num_attention_heads=1,
                 size_per_head=512,
                 query_act=None,
                 key_act=None,
                 value_act=None,
                 has_attention_mask=False,
                 attention_probs_dropout_prob=0.0,
                 use_one_hot_embeddings=False,
                 initializer_range=0.02,
                 do_return_2d_tensor=False,
                 use_relative_positions=False,
                 compute_type=mstype.float32):

        super(BertAttention, self).__init__()
        self.batch_size = batch_size
        self.from_seq_length = from_seq_length
        self.to_seq_length = to_seq_length
        self.num_attention_heads = num_attention_heads
        self.size_per_head = size_per_head
        self.has_attention_mask = has_attention_mask
        self.use_relative_positions = use_relative_positions

        self.scores_mul = Tensor([1.0 / math.sqrt(float(self.size_per_head))], dtype=compute_type)
        self.reshape = ops.Reshape()
        self.shape_from_2d = (-1, from_tensor_width)
        self.shape_to_2d = (-1, to_tensor_width)
        weight = TruncatedNormal(initializer_range)
        units = num_attention_heads * size_per_head
        self.query_layer = nn.Dense(from_tensor_width,
                                    units,
                                    activation=query_act,
                                    weight_init=weight).to_float(compute_type)
        self.key_layer = nn.Dense(to_tensor_width,
                                  units,
                                  activation=key_act,
                                  weight_init=weight).to_float(compute_type)
        self.value_layer = nn.Dense(to_tensor_width,
                                    units,
                                    activation=value_act,
                                    weight_init=weight).to_float(compute_type)

        self.shape_from = (batch_size, from_seq_length, num_attention_heads, size_per_head)
        self.shape_to = (
            batch_size, to_seq_length, num_attention_heads, size_per_head)

        self.matmul_trans_b = ops.BatchMatMul(transpose_b=True)
        self.multiply = ops.Mul()
        self.transpose = ops.Transpose()
        self.trans_shape = (0, 2, 1, 3)
        self.trans_shape_relative = (2, 0, 1, 3)
        self.trans_shape_position = (1, 2, 0, 3)
        #self.multiply_data = Tensor([-10000.0,], dtype=compute_type)
        self.multiply_data = Tensor([-10000.0,], dtype=mstype.float32)
        self.batch_num = batch_size * num_attention_heads
        self.matmul = ops.BatchMatMul()

        self.softmax = nn.Softmax()
        self.dropout = nn.Dropout(1 - attention_probs_dropout_prob)

        if self.has_attention_mask:
            self.expand_dims = ops.ExpandDims()
            self.sub = ops.Sub()
            self.add = ops.TensorAdd()
            self.cast = ops.Cast()
            self.get_dtype = ops.DType()
        if do_return_2d_tensor:
            self.shape_return = (batch_size * from_seq_length, num_attention_heads * size_per_head)
        else:
            self.shape_return = (batch_size, from_seq_length, num_attention_heads * size_per_head)

        self.cast_compute_type = SaturateCast(dst_type=compute_type)
        if self.use_relative_positions:
            self._generate_relative_positions_embeddings = \
                RelaPosEmbeddingsGenerator(length=to_seq_length,
                                           depth=size_per_head,
                                           max_relative_position=16,
                                           initializer_range=initializer_range,
                                           use_one_hot_embeddings=use_one_hot_embeddings)