예제 #1
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              stride=1,
              groups=1):
     super(ConvBNReLU, self).__init__()
     padding = (kernel_size - 1) // 2
     if groups == 1:
         conv = layers.Conv2d(in_channels,
                              out_channels,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              padding=padding)
     else:
         conv = layers.Conv2d(in_channels,
                              in_channels,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              padding=padding,
                              group=in_channels)
     self.features = layers.SequentialLayer(
         [conv, layers.BatchNorm2d(out_channels),
          layers.ReLU6()])
예제 #2
0
    def __init__(self,
                 class_num=1000,
                 growth_rate=12,
                 block_config=(6, 12, 24, 16),
                 bn_size=4,
                 theta=0.5,
                 bc=False):
        super(DenseNet, self).__init__()

        num_init_feature = 2 * growth_rate
        if bc:
            self.features = layers.SequentialLayer([
                _conv3x3(3, num_init_feature, 1),
                _bn(num_init_feature),
                ReLU()
            ])
        else:
            self.features = layers.SequentialLayer([
                _conv7x7(3, num_init_feature, 2),
                _bn(num_init_feature),
                ReLU(),
                MaxPool2d(kernel_size=2,
                          stride=2,
                          pad_mode='same',
                          data_format='NCHW')
            ])

        num_feature = num_init_feature
        for i, num_layers in enumerate(block_config):

            self.features.append(
                _DenseBlock(num_layers, num_feature, bn_size, growth_rate))
            num_feature = num_feature + growth_rate * num_layers
            if i != len(block_config) - 1:
                self.features.append(
                    _Transition(num_feature, int(num_feature * theta)))
                num_feature = int(num_feature * theta)

        self.norm = _bn(num_feature)
        self.relu = ReLU()
        # self.features.append([_bn(num_feature),ReLU()])
        self.mean = ReduceMean(keep_dims=True)
        self.flatten = layers.Flatten()
        self.end_point = _fc(num_feature, class_num)
예제 #3
0
 def __init__(self,
              in_planes=3,
              ngf=64,
              n_layers=9,
              alpha=0.2,
              norm_mode='batch',
              dropout=True,
              pad_mode="CONSTANT"):
     super(ResNetGenerator, self).__init__()
     self.conv_in = ConvNormReLU(in_planes,
                                 ngf,
                                 7,
                                 1,
                                 alpha=alpha,
                                 norm_mode=norm_mode,
                                 pad_mode=pad_mode)
     self.down_1 = ConvNormReLU(ngf, ngf * 2, 3, 2, alpha, norm_mode)
     self.down_2 = ConvNormReLU(ngf * 2, ngf * 4, 3, 2, alpha, norm_mode)
     layer_list = [
         ResidualBlock(
             ngf * 4, norm_mode, dropout=dropout, pad_mode=pad_mode)
     ] * n_layers
     self.residuals = layers.SequentialLayer(layer_list)
     self.up_2 = ConvTransposeNormReLU(ngf * 4, ngf * 2, 3, 2, alpha,
                                       norm_mode)
     self.up_1 = ConvTransposeNormReLU(ngf * 2, ngf, 3, 2, alpha, norm_mode)
     if pad_mode == "CONSTANT":
         self.conv_out = layers.Conv2d(ngf,
                                       3,
                                       kernel_size=7,
                                       stride=1,
                                       pad_mode='pad',
                                       padding=3)
     else:
         pad = layers.Pad(paddings=((0, 0), (0, 0), (3, 3), (3, 3)),
                          mode=pad_mode)
         conv = layers.Conv2d(ngf,
                              3,
                              kernel_size=7,
                              stride=1,
                              pad_mode='pad')
         self.conv_out = layers.SequentialLayer([pad, conv])
     self.activate = Tanh()
예제 #4
0
 def __init__(self, in_channels, out_channels):
     super(_Transition, self).__init__()
     self.layer = layers.SequentialLayer([
         _bn(in_channels),
         layers.ReLU(),
         _conv1x1(in_channels, out_channels),
         AvgPool2d(kernel_size=2,
                   stride=2,
                   pad_mode='same',
                   data_format='NCHW')
     ])
예제 #5
0
 def __init__(self, in_channels, growth_rate, bn_size):
     super(_DenseLayer, self).__init__()
     self.layer = layers.SequentialLayer([
         _bn(in_channels),
         layers.ReLU(),
         _conv1x1(in_channels, bn_size * growth_rate),
         _bn(bn_size * growth_rate),
         layers.ReLU(),
         _conv3x3(bn_size * growth_rate, growth_rate),
     ])
     self.ops = Concat(axis=1)
예제 #6
0
def test_sequential():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    net = layers.SequentialLayer([
        layers.Conv2d(1, 6, 5, pad_mode='valid', weight_init="ones"),
        layers.ReLU(),
        layers.MaxPool2d(kernel_size=2, stride=2)
    ])
    model = Model(net)
    model.compile()
    z = model.predict(ts.ones((1, 1, 32, 32)))
    print(z.asnumpy())
예제 #7
0
 def __init__(self,
              input_channel=1280,
              class_num=1000,
              use_activation=False):
     super(MobileNetV2Head, self).__init__()
     # mobilenet head
     self.head = layers.SequentialLayer(
         ([GlobalAvgPooling(),
           layers.Dense(input_channel, class_num)]))
     self.use_activation = use_activation
     self.activation = Softmax()
     self._initialize_weights()
예제 #8
0
 def __init__(self, features, class_num=1000):
     super(VGG, self).__init__()
     self.features = features
     self.flatten = layers.Flatten()
     self.classifier = layers.SequentialLayer([
         layers.Dense(512 * 7 * 7, 4096),
         layers.ReLU(),
         layers.Dropout(),
         layers.Dense(4096, 4096),
         layers.ReLU(),
         layers.Dropout(),
         layers.Dense(4096, class_num),
     ])
예제 #9
0
def make_layers(cfg, batch_norm=False):
    Layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            Layers += [layers.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = _conv3x3(in_channels, v)
            if batch_norm:
                Layers += [conv2d, layers.BatchNorm2d(v), layers.ReLU()]
            else:
                Layers += [conv2d, layers.ReLU()]
            in_channels = v
    return layers.SequentialLayer(Layers)
예제 #10
0
 def __init__(self,
              in_planes=3,
              ndf=64,
              n_layers=3,
              alpha=0.2,
              norm_mode='batch'):
     super(Discriminator, self).__init__()
     kernel_size = 4
     layer_list = [
         layers.Conv2d(in_planes,
                       ndf,
                       kernel_size,
                       2,
                       pad_mode='pad',
                       padding=1),
         layers.LeakyReLU(alpha)
     ]
     nf_mult = ndf
     for i in range(1, n_layers):
         nf_mult_prev = nf_mult
         nf_mult = min(2**i, 8) * ndf
         layer_list.append(
             ConvNormReLU(nf_mult_prev,
                          nf_mult,
                          kernel_size,
                          2,
                          alpha,
                          norm_mode,
                          padding=1))
     nf_mult_prev = nf_mult
     nf_mult = min(2**n_layers, 8) * ndf
     layer_list.append(
         ConvNormReLU(nf_mult_prev,
                      nf_mult,
                      kernel_size,
                      1,
                      alpha,
                      norm_mode,
                      padding=1))
     layer_list.append(
         layers.Conv2d(nf_mult,
                       1,
                       kernel_size,
                       1,
                       pad_mode='pad',
                       padding=1))
     self.features = layers.SequentialLayer(layer_list)
예제 #11
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=4,
              stride=2,
              alpha=0.2,
              norm_mode='batch',
              pad_mode='CONSTANT',
              use_relu=True,
              padding=None):
     super(ConvTransposeNormReLU, self).__init__()
     conv = layers.Conv2dTranspose(in_planes,
                                   out_planes,
                                   kernel_size,
                                   stride=stride,
                                   pad_mode='same')
     norm = layers.BatchNorm2d(out_planes)
     if norm_mode == 'instance':
         # Use BatchNorm2d with batchsize=1, affine=False, training=True instead of InstanceNorm2d
         norm = layers.BatchNorm2d(out_planes, affine=False)
     has_bias = (norm_mode == 'instance')
     if padding is None:
         padding = (kernel_size - 1) // 2
     if pad_mode == 'CONSTANT':
         conv = layers.Conv2dTranspose(in_planes,
                                       out_planes,
                                       kernel_size,
                                       stride,
                                       pad_mode='same',
                                       has_bias=has_bias)
         layer_list = [conv, norm]
     else:
         paddings = ((0, 0), (0, 0), (padding, padding), (padding, padding))
         pad = layers.Pad(paddings=paddings, mode=pad_mode)
         conv = layers.Conv2dTranspose(in_planes,
                                       out_planes,
                                       kernel_size,
                                       stride,
                                       pad_mode='pad',
                                       has_bias=has_bias)
         layer_list = [pad, conv, norm]
     if use_relu:
         relu = layers.ReLU()
         if alpha > 0:
             relu = layers.LeakyReLU(alpha)
         layer_list.append(relu)
     self.features = layers.SequentialLayer(layer_list)
예제 #12
0
파일: unet.py 프로젝트: huxiaoman7/tinyms
    def __init__(self, outer_nc, inner_nc, in_planes=None, dropout=False,
                 submodule=None, outermost=False, innermost=False, alpha=0.2, norm_mode='batch'):
        super(UnetSkipConnectionBlock, self).__init__()
        downnorm = layers.BatchNorm2d(inner_nc)
        upnorm = layers.BatchNorm2d(outer_nc)
        use_bias = False
        if norm_mode == 'instance':
            downnorm = layers.BatchNorm2d(inner_nc, affine=False)
            upnorm = layers.BatchNorm2d(outer_nc, affine=False)
            use_bias = True
        if in_planes is None:
            in_planes = outer_nc
        downconv = layers.Conv2d(in_planes, inner_nc, kernel_size=4,
                             stride=2, padding=1, has_bias=use_bias, pad_mode='pad')
        downrelu = layers.LeakyReLU(alpha)
        uprelu = layers.ReLU()

        if outermost:
            upconv = layers.Conv2dTranspose(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, pad_mode='pad')
            down = [downconv]
            up = [uprelu, upconv, layers.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = layers.Conv2dTranspose(inner_nc, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, has_bias=use_bias, pad_mode='pad')
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = layers.Conv2dTranspose(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, has_bias=use_bias, pad_mode='pad')
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            model = down + [submodule] + up
            if dropout:
                model.append(layers.Dropout(0.5))

        self.model = layers.SequentialLayer(model)
        self.skip_connections = not outermost
        self.concat = Concat(axis=1)
예제 #13
0
    def __init__(self, in_channel, out_channel, stride=1):
        super(ResidualBlock, self).__init__()
        channel = out_channel // self.expansion
        self.conv1 = _conv1x1(in_channel, channel, stride=1)
        self.bn1 = _bn(channel)
        self.conv2 = _conv3x3(channel, channel, stride=stride)
        self.bn2 = _bn(channel)
        self.conv3 = _conv1x1(channel, out_channel, stride=1)
        self.bn3 = _bn_last(out_channel)
        self.relu = layers.ReLU()

        self.down_sample = False
        self.down_sample_layer = None
        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        if self.down_sample:
            self.down_sample_layer = layers.SequentialLayer(
                [_conv1x1(in_channel, out_channel, stride), _bn(out_channel)])
예제 #14
0
    def __init__(self,
                 width_mult=1.,
                 round_nearest=8,
                 input_channel=32,
                 last_channel=1280):
        super(MobileNetV2Backbone, self).__init__()
        # setting of inverted residual blocks
        self.cfgs = [
            # t, c, n, s
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 320, 1, 1],
        ]

        # building first layer
        input_channel = _make_divisible(input_channel * width_mult,
                                        round_nearest)
        self.last_channel = _make_divisible(
            last_channel * max(1.0, width_mult), round_nearest)
        backbone_layers = [ConvBNReLU(3, input_channel, stride=2)]
        # building inverted residual blocks
        for t, c, n, s in self.cfgs:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                stride = s if i == 0 else 1
                backbone_layers.append(
                    InvertedResidual(input_channel,
                                     output_channel,
                                     stride,
                                     expand_ratio=t))
                input_channel = output_channel
        # building last several layers
        backbone_layers.append(
            ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
        self.backbone = layers.SequentialLayer(backbone_layers)
        self._initialize_weights()
예제 #15
0
    def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
        """
        Make stage network of ResNet.

        Args:
            block (layers.Layer): Resnet block.
            layer_num (int): Layer number.
            in_channel (int): Input channel.
            out_channel (int): Output channel.
            stride (int): Stride size for the first convolutional layer.

        Returns:
            SequentialLayer, the output layer.

        Examples:
            >>> _make_layer(ResidualBlock, 3, 128, 256, 2)
        """
        layer = layers.SequentialLayer([block(in_channel, out_channel, stride=stride)])
        for _ in range(1, layer_num):
            resnet_block = block(out_channel, out_channel, stride=1)
            layer.append(resnet_block)

        return layer
예제 #16
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = stride == 1 and inp == oup

        residual_layers = []
        if expand_ratio != 1:
            residual_layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        residual_layers.extend([
            ConvBNReLU(hidden_dim,
                       hidden_dim,
                       stride=stride,
                       groups=hidden_dim),
            layers.Conv2d(hidden_dim,
                          oup,
                          kernel_size=1,
                          stride=1,
                          has_bias=False),
            layers.BatchNorm2d(oup),
        ])
        self.conv = layers.SequentialLayer(residual_layers)