示例#1
0
    def __init__(self, in_channels, out_channels, num_outs):
        super(FeatPyramidNeck, self).__init__()
        self.num_outs = num_outs
        self.in_channels = in_channels
        self.fpn_layer = len(self.in_channels)

        assert not self.num_outs < len(in_channels)

        self.lateral_convs_list_ = []
        self.fpn_convs_ = []

        for _, channel in enumerate(in_channels):
            l_conv = _conv(channel,
                           out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           pad_mode='valid')
            fpn_conv = _conv(out_channels,
                             out_channels,
                             kernel_size=3,
                             stride=1,
                             padding=0,
                             pad_mode='same')
            self.lateral_convs_list_.append(l_conv)
            self.fpn_convs_.append(fpn_conv)
        self.lateral_convs_list = nn.layer.CellList(self.lateral_convs_list_)
        self.fpn_convs_list = nn.layer.CellList(self.fpn_convs_)
        self.interpolate1 = P.ResizeBilinear((48, 80))
        self.interpolate2 = P.ResizeBilinear((96, 160))
        self.interpolate3 = P.ResizeBilinear((192, 320))
        self.cast = P.Cast()
        self.maxpool = P.MaxPool(ksize=1, strides=2, padding="same")
示例#2
0
    def construct(self, inputs):
        """Call forward function."""
        if self.size is not None:
            resize = P.ResizeBilinear(self.size, self.align_corners)
        else:
            w, h = self.shape(inputs)[2] * self.scale_factor, self.shape(
                inputs)[3] * self.scale_factor
            resize = P.ResizeBilinear((w, h), self.align_corners)

        return resize(inputs)
示例#3
0
 def __init__(self, feature_shape, scale_size=1.0, decoder_output_stride=4):
     super(DecoderSampleBlock, self).__init__()
     sample_h = (feature_shape[0] * scale_size +
                 1) / decoder_output_stride + 1
     sample_w = (feature_shape[1] * scale_size +
                 1) / decoder_output_stride + 1
     self.sample = P.ResizeBilinear((int(sample_h), int(sample_w)),
                                    align_corners=True)
示例#4
0
 def construct(self, high_feature, *low_feature):
     if self.use_deconv:
         output = self.up_conv(high_feature)
     else:
         _, _, h, w = F.shape(high_feature)
         output = P.ResizeBilinear((h * 2, w * 2))(high_feature)
         output = self.up_conv(output)
     for feature in low_feature:
         output = self.concat((output, feature))
     return self.conv(output)
示例#5
0
    def construct(self, inputs):
        image_features = ()
        for i, feature in enumerate(inputs):
            image_features = image_features + (self.lateral_convs_list[i](feature),)

        features = (image_features[-1],)
        for i in range(len(inputs) - 1):
            top = len(inputs) - i - 1
            down = top - 1
            size = F.shape(inputs[down])
            top_down = P.ResizeBilinear((size[2], size[3]))(features[-1])
            top_down = top_down + image_features[down]
            features = features + (top_down,)

        extract_features = ()
        num_features = len(features)
        for i in range(num_features):
            extract_features = extract_features + (self.fpn_convs_list[i](features[num_features - i - 1]),)

        return extract_features
示例#6
0
    def __init__(self, config):
        super(ETSNet, self).__init__()
        self.kernel_num = config.KERNEL_NUM
        self.inference = config.INFERENCE
        if config.INFERENCE:
            self.long_size = config.INFER_LONG_SIZE
        else:
            self.long_size = config.TRAIN_LONG_SIZE

        # backbone
        self.feature_extractor = ResNet(ResidualBlock,
                                        config.BACKBONE_LAYER_NUMS,
                                        config.BACKBONE_IN_CHANNELS,
                                        config.BACKBONE_OUT_CHANNELS)

        # neck
        self.feature_fusion = FPN(config.BACKBONE_OUT_CHANNELS,
                                  config.NECK_OUT_CHANNEL, self.long_size)

        # head
        self.conv1 = _conv(4 * config.NECK_OUT_CHANNEL,
                           config.NECK_OUT_CHANNEL,
                           kernel_size=3,
                           stride=1,
                           has_bias=True)
        self.bn1 = _bn(config.NECK_OUT_CHANNEL)
        self.relu1 = nn.ReLU()
        self.conv2 = _conv(config.NECK_OUT_CHANNEL,
                           config.KERNEL_NUM,
                           kernel_size=1,
                           has_bias=True)
        self._upsample = P.ResizeBilinear((self.long_size, self.long_size),
                                          align_corners=True)

        if self.inference:
            self.one_float32 = Tensor(1.0, mstype.float32)
            self.sigmoid = P.Sigmoid()
            self.greater = P.Greater()
            self.logic_and = P.LogicalAnd()

        print('ETSNet initialized!')
示例#7
0
    def __init__(self, num_classes, feature_shape, backbone, channel, depth,
                 infer_scale_sizes, atrous_rates, decoder_output_stride,
                 output_stride, fine_tune_batch_norm, image_pyramid):
        super(DeepLabV3, self).__init__()
        self.infer_scale_sizes = []
        if infer_scale_sizes is not None:
            self.infer_scale_sizes = infer_scale_sizes

        self.infer_scale_sizes = infer_scale_sizes
        if image_pyramid is None:
            image_pyramid = [1.0]

        self.image_pyramid = image_pyramid
        scale_sizes = []
        for pyramid in image_pyramid:
            scale_sizes.append(pyramid)
        for scale in infer_scale_sizes:
            scale_sizes.append(scale)
        self.samples = []
        for scale_size in scale_sizes:
            self.samples.append(SampleBlock(feature_shape, scale_size))
        self.samples = nn.CellList(self.samples)
        self.deeplabv3 = SingleDeepLabV3(
            num_classes=num_classes,
            feature_shape=feature_shape,
            backbone=resnet50_dl(fine_tune_batch_norm),
            channel=channel,
            depth=depth,
            scale_sizes=scale_sizes,
            atrous_rates=atrous_rates,
            decoder_output_stride=decoder_output_stride,
            output_stride=output_stride,
            fine_tune_batch_norm=fine_tune_batch_norm)
        self.softmax = P.Softmax(axis=1)
        self.concat = P.Concat(axis=2)
        self.expand_dims = P.ExpandDims()
        self.reduce_mean = P.ReduceMean()
        self.argmax = P.Argmax(axis=1)
        self.sample_common = P.ResizeBilinear(
            (int(feature_shape[2]), int(feature_shape[3])), align_corners=True)
示例#8
0
 def __init__(self, feature_shape, scale_size=1.0):
     super(SampleBlock, self).__init__()
     sample_h = np.ceil(float(feature_shape[2]) * scale_size)
     sample_w = np.ceil(float(feature_shape[3]) * scale_size)
     self.sample = P.ResizeBilinear((int(sample_h), int(sample_w)),
                                    align_corners=True)
示例#9
0
    def __init__(self,
                 num_classes,
                 feature_shape,
                 backbone,
                 channel,
                 depth,
                 scale_sizes,
                 atrous_rates,
                 decoder_output_stride,
                 output_stride,
                 fine_tune_batch_norm=False):
        super(SingleDeepLabV3, self).__init__()
        self.num_classes = num_classes
        self.channel = channel
        self.depth = depth
        self.scale_sizes = []
        for scale_size in np.sort(scale_sizes):
            self.scale_sizes.append(scale_size)
        self.net = backbone
        self.aspp = ASPP(channel=self.channel,
                         depth=self.depth,
                         feature_shape=[feature_shape[2], feature_shape[3]],
                         scale_sizes=self.scale_sizes,
                         atrous_rates=atrous_rates,
                         output_stride=output_stride,
                         fine_tune_batch_norm=fine_tune_batch_norm)

        atrous_rates_len = 0
        if atrous_rates is not None:
            atrous_rates_len = len(atrous_rates)
        self.fc1 = _conv_bn_relu(depth * (2 + atrous_rates_len),
                                 depth,
                                 ksize=1,
                                 stride=1,
                                 use_batch_statistics=fine_tune_batch_norm)
        self.fc2 = nn.Conv2d(depth,
                             num_classes,
                             kernel_size=1,
                             stride=1,
                             has_bias=True)
        self.upsample = P.ResizeBilinear(
            (int(feature_shape[2]), int(feature_shape[3])), align_corners=True)
        self.samples = []
        for scale_size in self.scale_sizes:
            self.samples.append(SampleBlock(feature_shape, scale_size))
        self.samples = nn.CellList(self.samples)
        self.feature_shape = [
            float(feature_shape[0]),
            float(feature_shape[1]),
            float(feature_shape[2]),
            float(feature_shape[3])
        ]

        self.pad = P.Pad(((0, 0), (0, 0), (1, 1), (1, 1)))
        self.dropout = nn.Dropout(keep_prob=0.9)
        self.shape = P.Shape()
        self.decoder_output_stride = decoder_output_stride
        if decoder_output_stride is not None:
            self.decoder = Decoder(
                low_level_channel=depth,
                channel=depth,
                depth=depth,
                feature_shape=[feature_shape[2], feature_shape[3]],
                scale_sizes=self.scale_sizes,
                decoder_output_stride=decoder_output_stride,
                fine_tune_batch_norm=fine_tune_batch_norm)
示例#10
0
 def __init__(self, feature_shape, scale_size, output_stride):
     super(ASPPSampleBlock, self).__init__()
     sample_h = (feature_shape[0] * scale_size + 1) / output_stride + 1
     sample_w = (feature_shape[1] * scale_size + 1) / output_stride + 1
     self.sample = P.ResizeBilinear((int(sample_h), int(sample_w)),
                                    align_corners=True)
示例#11
0
 def __init__(self):
     super(NetLoss, self).__init__()
     self.shape = P.Shape()
     self.up_sample1 = P.ResizeBilinear((14, 14))
     self.up_sample2 = P.ResizeBilinear((28, 28))
     self.up_sample3 = P.ResizeBilinear((36, 36))
示例#12
0
 def __init__(self, size=None, align_corner=False):
     super(NetResizeBilinear, self).__init__()
     self.op = P.ResizeBilinear(size=size, align_corners=align_corner)
示例#13
0
def unpool(size):
    return P.ResizeBilinear(size, align_corners=True)
示例#14
0
    def __init__(self, in_channels, out_channel, long_size):
        super(FPN, self).__init__()

        self.long_size = long_size

        # reduce layers
        self.reduce_conv_c2 = _conv(in_channels[0],
                                    out_channel,
                                    kernel_size=1,
                                    has_bias=True)
        self.reduce_bn_c2 = _bn(out_channel)
        self.reduce_relu_c2 = nn.ReLU()

        self.reduce_conv_c3 = _conv(in_channels[1],
                                    out_channel,
                                    kernel_size=1,
                                    has_bias=True)
        self.reduce_bn_c3 = _bn(out_channel)
        self.reduce_relu_c3 = nn.ReLU()

        self.reduce_conv_c4 = _conv(in_channels[2],
                                    out_channel,
                                    kernel_size=1,
                                    has_bias=True)
        self.reduce_bn_c4 = _bn(out_channel)
        self.reduce_relu_c4 = nn.ReLU()

        self.reduce_conv_c5 = _conv(in_channels[3],
                                    out_channel,
                                    kernel_size=1,
                                    has_bias=True)
        self.reduce_bn_c5 = _bn(out_channel)
        self.reduce_relu_c5 = nn.ReLU()

        # smooth layers
        self.smooth_conv_p4 = _conv(out_channel,
                                    out_channel,
                                    kernel_size=3,
                                    has_bias=True)
        self.smooth_bn_p4 = _bn(out_channel)
        self.smooth_relu_p4 = nn.ReLU()

        self.smooth_conv_p3 = _conv(out_channel,
                                    out_channel,
                                    kernel_size=3,
                                    has_bias=True)
        self.smooth_bn_p3 = _bn(out_channel)
        self.smooth_relu_p3 = nn.ReLU()

        self.smooth_conv_p2 = _conv(out_channel,
                                    out_channel,
                                    kernel_size=3,
                                    has_bias=True)
        self.smooth_bn_p2 = _bn(out_channel)
        self.smooth_relu_p2 = nn.ReLU()

        self._upsample_p4 = P.ResizeBilinear(
            (long_size // 16, long_size // 16), align_corners=True)
        self._upsample_p3 = P.ResizeBilinear((long_size // 8, long_size // 8),
                                             align_corners=True)
        self._upsample_p2 = P.ResizeBilinear((long_size // 4, long_size // 4),
                                             align_corners=True)

        self.concat = P.Concat(axis=1)
示例#15
0
 def construct(self, x, size=None, scale_factor=None, align_corners=False):
     shape = bilinear(x.shape, size, scale_factor, align_corners)
     resize_bilinear = P.ResizeBilinear(shape, align_corners)
     return resize_bilinear(x)
示例#16
0
def unpool(size):
    return P.ResizeBilinear(size)
示例#17
0
 def construct(self, x):
     size = self.shape(x)
     out = self.resnet(x)
     out = self.aspp(out)
     out = P.ResizeBilinear((size[2], size[3]), True)(out)
     return out
示例#18
0
def interpolate(input, size, mode='bilinear', align_corners=False):
    """Apply interpolate function."""
    return P.ResizeBilinear(size=tuple(size), align_corners=align_corners)(input)
示例#19
0
        'block': P.CTCLoss(),
        'desc_inputs': [Tensor(np.ones([6, 4, 6]).astype(np.float32)),
                        Tensor(np.array([[0, 1], [1, 0], [2, 3], [3, 2]]).astype(np.int64)),
                        Tensor(np.array([1, 2, 3, 4]).astype(np.int32)),
                        Tensor(np.array([6, 6, 6, 6]).astype(np.int32))],
        'desc_bprop': [[4], [6, 4, 6]]}),
    ('L2Loss_1', {
        'block': P.L2Loss(),
        'desc_inputs': [Tensor(np.array([1, 2, 3, 4]), mstype.float32)],
        'desc_bprop': []}),
    ('L2Loss_2', {
        'block': P.L2Loss(),
        'desc_inputs': [Tensor(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]), mstype.float16)],
        'desc_bprop': []}),
    ('ResizeBilinear', {
        'block': P.ResizeBilinear((5, 5)),
        'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)],
        'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mstype.float16)]}),
    ('ResizeBilinearGrad', {
        'block': G.ResizeBilinearGrad(),
        'desc_inputs': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32), Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)],
        'desc_bprop': [Tensor([[[[1, 2, 3, 4, 5]]]], mstype.float32)],
        'skip': ['backward']}),
]

test_case_array_ops = [
    ('SpaceToDepth', {
        'block': P.SpaceToDepth(2),
        'desc_inputs': [[1, 3, 2, 2]],
        'desc_bprop': [[1, 12, 1, 1]]}),
    ('DepthToSpace', {