示例#1
0
    def __init__(self):
        super(EAST, self).__init__()

        #param
        self.TEXT_SCALE = 512
        self.pi = math.pi / 1.
        self.pi = mindspore.Tensor([self.pi], mindspore.float32)

        #network
        self.model = vgg.vgg16()
        
        #for i = 0
        self.split = P.Split(1, 2)
        self.unpool0 = unpool((32, 32))
        self._concat = P.Concat(axis=1)

        #for i = 1
        self.concat1 = P.Concat(axis=1)
        self.conv1_1 = ops.conv_bn_relu(1024, 128, stride=1, kernel_size=1, padding='valid')
        self.conv1_2 = ops.conv_bn_relu(128, 128, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.unpool1 = unpool((64, 64))

        #for i = 2
        self.concat2 = P.Concat(axis=1)
        self.conv2_1 = ops.conv_bn_relu(384, 64, stride=1, kernel_size=1, padding='valid')
        self.conv2_2 = ops.conv_bn_relu(64, 64, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.unpool2 = unpool((128, 128))


        #for i = 3
        self.concat3 = P.Concat(axis=1)
        self.conv3_1 = ops.conv_bn_relu(192, 32, stride=1, kernel_size=1, padding='valid')
        self.conv3_2 = ops.conv_bn_relu(32, 32, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.conv3_3 = ops.conv_bn_relu(32, 32, stride=1, kernel_size=3, padding='pad', padding_number=1)


        #output
        ## F_score
        self.conv_for_fscore = ops._conv(32, 1, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_fscore = nn.Sigmoid()

        ## geo_map
        self.conv_for_geo_map = ops._conv(32, 4, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_geo_map = nn.Sigmoid()

        ## angle_map
        self.conv_for_angle_map = ops._conv(32, 1, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_angle_map = nn.Sigmoid()

        ## F_geometry 
        self.concat_for_F_geometry  = P.Concat(axis=1)


        ## other
        self.mul = P.Mul()
        self.add = P.TensorAdd()
示例#2
0
    def __init__(self, in_classes, kernel_size, padding, maxpool, has_bias):
        super(MusicTaggerCNN, self).__init__()
        self.in_classes = in_classes
        self.kernel_size = kernel_size
        self.maxpool = maxpool
        self.padding = padding
        self.has_bias = has_bias
        # build model
        self.conv1 = nn.Conv2d(self.in_classes[0], self.in_classes[1],
                               self.kernel_size[0])
        self.conv2 = nn.Conv2d(self.in_classes[1], self.in_classes[2],
                               self.kernel_size[1])
        self.conv3 = nn.Conv2d(self.in_classes[2], self.in_classes[3],
                               self.kernel_size[2])
        self.conv4 = nn.Conv2d(self.in_classes[3], self.in_classes[4],
                               self.kernel_size[3])

        self.bn1 = nn.BatchNorm2d(self.in_classes[1])
        self.bn2 = nn.BatchNorm2d(self.in_classes[2])
        self.bn3 = nn.BatchNorm2d(self.in_classes[3])
        self.bn4 = nn.BatchNorm2d(self.in_classes[4])

        self.pool1 = nn.MaxPool2d(maxpool[0], maxpool[0])
        self.pool2 = nn.MaxPool2d(maxpool[1], maxpool[1])
        self.pool3 = nn.MaxPool2d(maxpool[2], maxpool[2])
        self.pool4 = nn.MaxPool2d(maxpool[3], maxpool[3])
        self.poolreduce = P.ReduceMax(keep_dims=False)
        self.Act = nn.ReLU()
        self.flatten = nn.Flatten()
        self.dense = nn.Dense(2048, 50, activation='sigmoid')
        self.sigmoid = nn.Sigmoid()
示例#3
0
    def __init__(self, scale, config=ConfigYOLOV4CspDarkNet53(), is_training=True):
        super(DetectionBlock, self).__init__()
        self.config = config
        if scale == 's':
            idx = (0, 1, 2)
            self.scale_x_y = 1.2
            self.offset_x_y = 0.1
        elif scale == 'm':
            idx = (3, 4, 5)
            self.scale_x_y = 1.1
            self.offset_x_y = 0.05
        elif scale == 'l':
            idx = (6, 7, 8)
            self.scale_x_y = 1.05
            self.offset_x_y = 0.025
        else:
            raise KeyError("Invalid scale value for DetectionBlock")
        self.anchors = Tensor([self.config.anchor_scales[i] for i in idx], ms.float32)
        self.num_anchors_per_scale = 3
        self.num_attrib = 4+1+self.config.num_classes
        self.lambda_coord = 1

        self.sigmoid = nn.Sigmoid()
        self.reshape = P.Reshape()
        self.tile = P.Tile()
        self.concat = P.Concat(axis=-1)
        self.conf_training = is_training
示例#4
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.fc1 = nn.Dense(1024, 400)
     self.fc2 = nn.Dense(400, 720)
     self.fc3 = nn.Dense(720, 1024)
     self.relu = nn.ReLU()
     self.sigmoid = nn.Sigmoid()
     self.flatten = nn.Flatten()
示例#5
0
def logits_to_probs(logits, is_binary=False):
    """
    converts logits into probabilities.
    Args:
        logits (Tensor)
        is_binary (bool)
    """
    if is_binary:
        return nn.Sigmoid()(logits)
    return nn.Softmax(axis=-1)(logits)
示例#6
0
 def __init__(self, channel, reduction=4):
     super().__init__()
     reduced_chs = _make_value_divisible(channel/reduction, 1)
     self.avg_pool = AdaptiveAvgPool(output_size=(1, 1))
     weight = weight_variable()
     self.conv_reduce = nn.Conv2d(in_channels=channel, out_channels=reduced_chs, kernel_size=1, has_bias=True,
                                  weight_init=weight)
     self.act1 = Swish()
     self.conv_expand = nn.Conv2d(in_channels=reduced_chs, out_channels=channel, kernel_size=1, has_bias=True)
     self.act2 = nn.Sigmoid()
示例#7
0
    def __init__(self):
        super(CenterfaceMobilev2, self).__init__()
        self.config = ConfigCenterface()

        self.base = mobilenet_v2()
        channels = self.base.feat_channel
        self.dla_up = MobileNetUp(channels, out_dim=self.config.head_conv)

        self.hm_head = nn.SequentialCell([conv1x1(self.config.head_conv, 1, has_bias=True),
                                          nn.Sigmoid().add_flags_recursive(fp32=True)])
        self.wh_head = conv1x1(self.config.head_conv, 2, has_bias=True)
        self.off_head = conv1x1(self.config.head_conv, 2, has_bias=True)
        self.kps_head = conv1x1(self.config.head_conv, 10, has_bias=True)
示例#8
0
def Act(type='default'):
    if type in ['default', 'def']:
        return Act(DEFAULTS['activation'])
    if type == 'relu':
        return nn.ReLU()
    elif type == 'sigmoid':
        return nn.Sigmoid()
    elif type == 'hswish':
        return nn.HSwish()
    elif type == 'leaky_relu':
        return nn.LeakyReLU(alpha=DEFAULTS['leaky_relu']['alpha'])
    else:
        raise ValueError("Unsupported activation type: %s" % type)
示例#9
0
 def __init__(self, net_config, K=100, enable_nms_fp16=True):
     super(DetectionDecode, self).__init__()
     self.K = K
     self.nms = NMS(enable_nms_fp16=enable_nms_fp16)
     self.shape = ops.Shape()
     self.gather_topk = GatherTopK()
     self.half = ops.Split(axis=-1, output_num=2)
     self.add = ops.TensorAdd()
     self.concat_a2 = ops.Concat(axis=2)
     self.trans_gather_feature = TransposeGatherFeature()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.reg_offset = net_config.reg_offset
     self.Sigmoid = nn.Sigmoid()
示例#10
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 stride=1,
                 use_se=False, se_block=False):
        super(ResidualBlock, self).__init__()
        self.stride = stride
        self.use_se = use_se
        self.se_block = se_block
        channel = out_channel // self.expansion
        self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)
        self.bn1 = _bn(channel)
        if self.use_se and self.stride != 1:
            self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),
                                         nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])
        else:
            self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)
            self.bn2 = _bn(channel)

        self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)
        self.bn3 = _bn_last(out_channel)
        if self.se_block:
            self.se_global_pool = P.ReduceMean(keep_dims=False)
            self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se)
            self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se)
            self.se_sigmoid = nn.Sigmoid()
            self.se_mul = P.Mul()
        self.relu = nn.ReLU()

        self.down_sample = False

        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        self.down_sample_layer = None

        if self.down_sample:
            if self.use_se:
                if stride == 1:
                    self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,
                                                                         stride, use_se=self.use_se), _bn(out_channel)])
                else:
                    self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),
                                                                _conv1x1(in_channel, out_channel, 1,
                                                                         use_se=self.use_se), _bn(out_channel)])
            else:
                self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
                                                                     use_se=self.use_se), _bn(out_channel)])
示例#11
0
 def __init__(self, channel, reduction=16):
     """rcan"""
     super(CALayer, self).__init__()
     # global average pooling: feature --> point
     self.avg_pool = AdaptiveAvgPool2d()
     # feature channel downscale and upscale --> channel weight
     self.conv_du = nn.SequentialCell([
         nn.Conv2d(channel,
                   channel // reduction,
                   1,
                   padding=0,
                   has_bias=True,
                   pad_mode='pad'),
         nn.ReLU(),
         nn.Conv2d(channel // reduction,
                   channel,
                   1,
                   padding=0,
                   has_bias=True,
                   pad_mode='pad'),
         nn.Sigmoid()
     ])
示例#12
0
    def __init__(self, scale, config):
        super(DetectionBlock, self).__init__()

        self.config = config
        if scale == 's':
            idx = (0, 1, 2)
        elif scale == 'm':
            idx = (3, 4, 5)
        elif scale == 'l':
            idx = (6, 7, 8)
        else:
            raise KeyError("Invalid scale value for DetectionBlock")
        self.anchors = Tensor([self.config.anchor_scales[i] for i in idx], ms.float32)
        self.num_anchors_per_scale = 3
        self.num_attrib = 4 + 1 + self.config.num_classes
        self.ignore_threshold = 0.5
        self.lambda_coord = 1

        self.sigmoid = nn.Sigmoid()
        self.reshape = P.Reshape()
        self.tile = P.Tile()
        self.concat = P.Concat(axis=-1)
        self.input_shape = Tensor(tuple(config.img_shape[::-1]), ms.float32)
示例#13
0
    def forward_given_params(self, x, weights, biases):
        """
        Compute output value of the fully connected NNs

        Parameters
        ----------
        x: batch_size x num_vars
        weights: List
            ith list contains weights for ith MLP
        biases: List
            ith list contains biases for ith MLP
        Returns
        -------
        out: batch_size x num_vars * num_params
            the parameters of each variable conditional
        """
        for k in range(self.hidden_num + 1):
            # apply affine operator
            if k == 0:
                # first part
                adj = ops.expand_dims(self.adjacency.transpose(), 1)
                einsum_one = ops.mul(weights[k], adj)
                # second part
                x = ops.expand_dims(ops.expand_dims(x, 2), 1)
                x = ops.matmul(einsum_one, x).squeeze(3) + biases[k]
            else:
                x = ops.matmul(weights[k], ops.expand_dims(
                    x, 3)).squeeze(3) + biases[k]

            # apply non-linearity element-wise
            if k != self.hidden_num:
                if self.nonlinear == "leaky-relu":
                    x = nn.LeakyReLU(alpha=0.01)(x)
                else:
                    x = nn.Sigmoid()(x)

        return ops.Unstack(axis=1)(x)
示例#14
0
 def __init__(self):
     super(Sigmoid, self).__init__()
     self.cast = ops.Cast()
     self.dtype = ops.DType()
     self.sigmoid = nn.Sigmoid()
     self.clip_by_value = ops.clip_by_value
示例#15
0
def swish(x):
    return x * nn.Sigmoid()(x)
示例#16
0
 def __init__(self):
     super(Decoder, self).__init__()
     self.fc2 = nn.Dense(400, 1024)
     self.sigmoid = nn.Sigmoid()
     self.reshape = P.Reshape()
示例#17
0
 def __init__(self):
     super().__init__()
     self.sigmoid = nn.Sigmoid()
示例#18
0
 def __init__(self):
     super(Decoder, self).__init__()
     self.fc1 = nn.Dense(3, 6)
     self.sigmoid = nn.Sigmoid()