Example #1
0
    def __init__(self, ndf=64, n_layers=3, use_sigmoid=False):
        super(NLayerDiscriminator, self).__init__()
        self.model = nn.HybridSequential()
        kw = 4
        padw = 1
        with self.name_scope():
            self.model.add(
                nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw),
                nn.LeakyReLU(0.2),
            )

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult = min(2**n, 8)
                self.model.add(
                    nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw),
                    nn.InstanceNorm(),
                    nn.LeakyReLU(0.2),
                )

            nf_mult = min(2**n_layers, 8)
            self.model.add(
                nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw),
                nn.InstanceNorm(),
                nn.LeakyReLU(0.2),
            )
            self.model.add(
                nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw)
            )
            if use_sigmoid:
                self.model.add(nn.Activation('sigmoid'))
Example #2
0
 def __init__(self, **kwargs):
     super(ResnetGenerator, self).__init__(**kwargs)
     num_blocks = 9
     with self.name_scope():
         self.module = nn.Sequential()
         with self.module.name_scope():
             self.module.add(nn.ReflectionPad2D(3))
             self.module.add(nn.Conv2D(64, kernel_size=7, padding=0))
             self.module.add(nn.InstanceNorm())
             self.module.add(nn.Activation(activation='relu'))
             n_downsampling = 2
             for i in range(n_downsampling):
                 mult = 2**i
                 self.module.add(
                     nn.Conv2D(128 * mult,
                               kernel_size=3,
                               strides=2,
                               padding=1))
                 self.module.add(nn.InstanceNorm())
                 self.module.add(nn.Activation(activation='relu'))
             for i in range(num_blocks):
                 self.module.add(ResnetBlock())
             for i in range(n_downsampling):
                 mult = 2**(n_downsampling - i)
                 self.module.add(
                     nn.Conv2DTranspose(int(64 * mult / 2),
                                        kernel_size=3,
                                        strides=2,
                                        padding=1,
                                        output_padding=1))
                 self.module.add(nn.InstanceNorm())
                 self.module.add(nn.Activation(activation='relu'))
             self.module.add(nn.ReflectionPad2D(3))
             self.module.add(nn.Conv2D(3, kernel_size=7, padding=0))
             self.module.add(nn.Activation(activation='tanh'))
Example #3
0
    def __init__(self,
                 channels,
                 bn_use_global_stats=False,
                 first_fraction=0.5,
                 inst_first=True,
                 **kwargs):
        super(IBN, self).__init__(**kwargs)
        self.inst_first = inst_first
        h1_channels = int(math.floor(channels * first_fraction))
        h2_channels = channels - h1_channels
        self.split_sections = [h1_channels, h2_channels]

        if self.inst_first:
            self.inst_norm = nn.InstanceNorm(
                in_channels=h1_channels,
                scale=True)
            self.batch_norm = nn.BatchNorm(
                in_channels=h2_channels,
                use_global_stats=bn_use_global_stats)

        else:
            self.batch_norm = nn.BatchNorm(
                in_channels=h1_channels,
                use_global_stats=bn_use_global_stats)
            self.inst_norm = nn.InstanceNorm(
                in_channels=h2_channels,
                scale=True)
Example #4
0
    def build_conv_block(self, dim, padding_type, use_dropout):
        conv_block = nn.HybridSequential()
        p = 0
        with self.name_scope():
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' %
                                          padding_type)

            conv_block.add(nn.Conv2D(dim, kernel_size=3, padding=p),
                           nn.InstanceNorm(), nn.Activation('relu'))
            if use_dropout:
                conv_block.add(nn.Dropout(0.5))

            p = 0
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' %
                                          padding_type)
            conv_block.add(nn.Conv2D(dim, kernel_size=3, padding=p),
                           nn.InstanceNorm())

        return conv_block
Example #5
0
 def get_blocks(self):
     model = nn.Sequential()
     with model.name_scope():
         model.add(nn.ReflectionPad2D(1))
         model.add(nn.Conv2D(256, kernel_size=3))
         model.add(nn.InstanceNorm())
         model.add(nn.Activation(activation='relu'))
         model.add(nn.ReflectionPad2D(1))
         model.add(nn.Conv2D(256, kernel_size=3))
         model.add(nn.InstanceNorm())
     return model
Example #6
0
 def __init__(self,filters):
     super(ResBlock, self).__init__()
     self.net = nn.HybridSequential()
     with self.net.name_scope():
         self.net.add(
             nn.Conv2D(filters, kernel_size=3, padding=1),
             nn.InstanceNorm(),
             nn.Activation('relu'),
             nn.Conv2D(filters, kernel_size=3, padding=1),
             nn.InstanceNorm(),
             nn.Activation('relu')
         )
Example #7
0
 def __init__(self,
              outer_nc,
              inner_nc,
              submodule=None,
              outermost=False,
              innermost=False,
              use_dropout=False):
     super(UnetSkipConnectionBlock, self).__init__()
     self.outermost = outermost
     downconv = nn.Conv2D(inner_nc, kernel_size=4, strides=2, padding=1)
     downrelu = nn.LeakyReLU(0.2)
     downnorm = nn.InstanceNorm()
     uprelu = nn.Activation('relu')
     upnorm = nn.InstanceNorm()
     self.model = nn.HybridSequential()
     with self.model.name_scope():
         if outermost:
             self.model.add(downconv)
             if submodule is not None:
                 self.model.add(submodule)
             self.model.add(
                 uprelu,
                 nn.Conv2DTranspose(outer_nc,
                                    kernel_size=4,
                                    strides=2,
                                    padding=1), nn.Activation('tanh'))
         elif innermost:
             self.model.add(
                 downrelu, downconv, uprelu,
                 nn.Conv2DTranspose(outer_nc,
                                    kernel_size=4,
                                    strides=2,
                                    padding=1), upnorm)
         else:
             self.model.add(
                 downrelu,
                 downconv,
                 downnorm,
             )
             if submodule is not None:
                 self.model.add(submodule)
             self.model.add(
                 uprelu,
                 nn.Conv2DTranspose(outer_nc,
                                    kernel_size=4,
                                    strides=2,
                                    padding=1),
                 upnorm,
             )
             if use_dropout:
                 self.model.add(nn.Dropout(0.5))
Example #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides,
                 padding,
                 dilation=1,
                 groups=1,
                 use_bias=False,
                 activate=True,
                 **kwargs):
        super(IBNbConvBlock, self).__init__(**kwargs)
        self.activate = activate

        with self.name_scope():
            self.conv = nn.Conv2D(channels=out_channels,
                                  kernel_size=kernel_size,
                                  strides=strides,
                                  padding=padding,
                                  dilation=dilation,
                                  groups=groups,
                                  use_bias=use_bias,
                                  in_channels=in_channels)
            self.inst_norm = nn.InstanceNorm(in_channels=out_channels,
                                             scale=True)
            if self.activate:
                self.activ = nn.Activation("relu")
Example #9
0
    def __init__(self,
                 output_nc,
                 ngf=64,
                 use_dropout=False,
                 n_blocks=6,
                 padding_type='reflect'):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.output_nc = output_nc
        self.ngf = ngf
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(nn.ReflectionPad2D(3),
                           nn.Conv2D(ngf, kernel_size=7, padding=0),
                           nn.InstanceNorm(), nn.Activation('relu'))

            n_downsampling = 2
            for i in range(n_downsampling):
                mult = 2**i
                self.model.add(
                    nn.Conv2D(ngf * mult * 2,
                              kernel_size=3,
                              strides=2,
                              padding=1), nn.InstanceNorm(),
                    nn.Activation('relu'))

            mult = 2**n_downsampling
            for i in range(n_blocks):
                self.model.add(
                    ResnetBlock(ngf * mult,
                                padding_type=padding_type,
                                use_dropout=use_dropout))

            for i in range(n_downsampling):
                mult = 2**(n_downsampling - i)
                self.model.add(
                    nn.Conv2DTranspose(int(ngf * mult / 2),
                                       kernel_size=3,
                                       strides=2,
                                       padding=1,
                                       output_padding=1), nn.InstanceNorm(),
                    nn.Activation('relu'))
            self.model.add(nn.ReflectionPad2D(3),
                           nn.Conv2D(output_nc, kernel_size=7, padding=0),
                           nn.Activation('tanh'))
 def __init__(self):
     super(Discriminator, self).__init__()
     self.net = nn.HybridSequential()
     with self.net.name_scope():
         self.net.add(
             nn.Conv2D(64, kernel_size=3, strides=2, padding=1),
             nn.LeakyReLU(0.2),
             nn.Conv2D(128, kernel_size=3, strides=2, padding=1),
             nn.InstanceNorm(),
             nn.LeakyReLU(0.2),
             nn.Conv2D(256, kernel_size=3, strides=2, padding=1),
             nn.InstanceNorm(),
             nn.LeakyReLU(0.2),
             nn.Conv2D(512, kernel_size=3, strides=2, padding=1),
             nn.InstanceNorm(),
             nn.LeakyReLU(0.2),
             nn.Conv2D(1, kernel_size=1, strides=1),
         )
Example #11
0
 def __init__(self, dr_rate, **kwargs):
     super(LipNet, self).__init__(**kwargs)
     with self.name_scope():
         self.conv1 = nn.Conv3D(32, kernel_size=(3, 5, 5), strides=(1, 2, 2), padding=(1, 2, 2))
         self.bn1 = nn.InstanceNorm(in_channels=32)
         self.dr1 = nn.Dropout(dr_rate, axes=(1, 2))
         self.pool1 = nn.MaxPool3D((1, 2, 2), (1, 2, 2))
         self.conv2 = nn.Conv3D(64, kernel_size=(3, 5, 5), strides=(1, 1, 1), padding=(1, 2, 2))
         self.bn2 = nn.InstanceNorm(in_channels=64)
         self.dr2 = nn.Dropout(dr_rate, axes=(1, 2))
         self.pool2 = nn.MaxPool3D((1, 2, 2), (1, 2, 2))
         self.conv3 = nn.Conv3D(96, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding=(1, 2, 2))
         self.bn3 = nn.InstanceNorm(in_channels=96)
         self.dr3 = nn.Dropout(dr_rate, axes=(1, 2))
         self.pool3 = nn.MaxPool3D((1, 2, 2), (1, 2, 2))
         self.gru1 = rnn.GRU(256, bidirectional=True)
         self.gru2 = rnn.GRU(256, bidirectional=True)
         self.dense = nn.Dense(27+1, flatten=False)
Example #12
0
def ReconetConvBlock(channels,
                     kernel,
                     stride,
                     bias=True,
                     ifin=True,
                     ifinf=False):
    out = nn.HybridSequential()
    out.add(ConvLayer(channels, kernel, stride, bias, ifinf=ifinf),
            nn.InstanceNorm(scale=True) if ifin else nn.BatchNorm(scale=True),
            nn.Activation('relu'))
    return out
Example #13
0
    def __init__(self, filters):
        super(upconv_inst_relu, self).__init__()

        self.filters = filters
        self.net = nn.HybridSequential()
        with self.net.name_scope():
                self.net.add(
                    upconv(filters),
                    nn.InstanceNorm(),
                    nn.Activation('relu')
                )
Example #14
0
    def __init__(self,filters):
        super(conv_inst_relu, self).__init__()
        self.filters = filters
        self.net = nn.HybridSequential()
        with self.net.name_scope():
                self.net.add(
                    nn.Conv2D(self.filters, kernel_size=3, padding=1, strides=2),
                    nn.InstanceNorm(),
                    nn.Activation('relu')

                )
Example #15
0
    def __init__(self, in_channel, style_dim):
        super().__init__()

        self.norm = nn.InstanceNorm(in_channels=in_channel)
        self.style = EqualLinear(style_dim, in_channel * 2)
        self.style.initialize()

        mx_params = self.style.collect_params()
        for k in mx_params.keys():
            if 'bias' in k:
                mx_params[k].data()[:in_channel] = 1
                mx_params[k].data()[in_channel:] = 0
Example #16
0
 def __init__(self,
              channels,
              kernel,
              ifin=True,
              ifinf=False,
              *args,
              **kwargs):
     super(ReconetUpSampleBlock, self).__init__(*args, **kwargs)
     with self.name_scope():
         self.conv = ConvLayer(channels, kernel, 1, bias=True, ifinf=ifinf)
         self.norm = nn.InstanceNorm() if ifin else nn.BatchNorm()
         self.act = nn.Activation('relu')
     return
Example #17
0
 def __init__(self, **kwargs):
     super(NlayerDiscriminator, self).__init__(**kwargs)
     kw = 4
     padw = 1
     with self.name_scope():
         self.model = nn.Sequential()
         with self.model.name_scope():
             self.model.add(
                 nn.Conv2D(64, kernel_size=kw, strides=2, padding=padw))
             self.model.add(nn.LeakyReLU(0.2))
             self.model.add(
                 nn.Conv2D(128, kernel_size=kw, strides=2, padding=padw))
             self.model.add(nn.InstanceNorm())
             self.model.add(nn.LeakyReLU(0.2))
             self.model.add(
                 nn.Conv2D(256, kernel_size=kw, strides=2, padding=padw))
             self.model.add(nn.InstanceNorm())
             self.model.add(nn.LeakyReLU(0.2))
             self.model.add(
                 nn.Conv2D(512, kernel_size=kw, strides=2, padding=padw))
             self.model.add(nn.InstanceNorm())
             self.model.add(nn.LeakyReLU(0.2))
             self.model.add(nn.Activation(activation='sigmoid'))
Example #18
0
 def __init__(self,
              channels,
              kernel=1,
              stride=1,
              ifin=True,
              ifinf=False,
              *args,
              **kwargs):
     super(ReconetResidualBlock, self).__init__(*args, **kwargs)
     with self.name_scope():
         self.conv1 = ConvLayer(channels,
                                kernel,
                                stride,
                                bias=False,
                                ifinf=ifinf)
         self.norm1 = nn.InstanceNorm() if ifin else nn.BatchNorm()
         self.conv2 = ConvLayer(channels,
                                kernel,
                                stride,
                                bias=False,
                                ifinf=ifinf)
         self.norm2 = nn.InstanceNorm() if ifin else nn.BatchNorm()
         self.act = nn.Activation('relu')
Example #19
0
 def __init__(self, NumLayer, filters):
     super(deconv_bn_relu, self).__init__()
     self.NumLayer = NumLayer
     self.filters = filters
     self.net = nn.HybridSequential()
     with self.net.name_scope():
         for i in range(NumLayer-1):
             self.net.add(
                 nn.Conv2DTranspose(self.filters,kernel_size=4, padding=1, strides=2),
                 nn.InstanceNorm(),
                 nn.Activation('relu')
             )
     self.net.add(
         nn.Conv2DTranspose(3, kernel_size=4, padding=1, strides=2)
     )
 def __init__(self,
              channels,
              latent_size,
              use_wscale=False,
              prefix='',
              **kwargs):
     super(AdaIN, self).__init__(prefix=prefix, **kwargs)
     self.channels = channels
     self.latent_size = latent_size
     self.affine = DenseW(channels * 2,
                          in_units=latent_size,
                          use_bias=True,
                          gain=1,
                          use_wscale=use_wscale,
                          flatten=False,
                          prefix=prefix + 'dense_affine_')
     self.instance = nn.InstanceNorm(axis=1,
                                     center=False,
                                     scale=False,
                                     in_channels=channels,
                                     prefix=prefix + 'norm_')
Example #21
0
 def __init__(self):
     super(Generator_256, self).__init__()
     self.net = nn.HybridSequential()
     with self.net.name_scope():
         self.net.add(
             nn.ReflectionPad2D(3),
             nn.Conv2D(32, kernel_size=7, strides=1),
             nn.InstanceNorm(),
             nn.Activation('relu'),  #c7s1-32
             conv_inst_relu(64),
             conv_inst_relu(128),
         )
         for _ in range(9):
             self.net.add(
                     ResBlock(128)
             )
         self.net.add(
             upconv_inst_relu(64),
             upconv_inst_relu(32),
             nn.ReflectionPad2D(3),
             nn.Conv2D(3,kernel_size=7,strides=1),
             nn.Activation('sigmoid')
         )
Example #22
0
    def __init__(self, in_channels, out_channels, strides, use_inst_norm,
                 bn_use_global_stats, **kwargs):
        super(IBNbResUnit, self).__init__(**kwargs)
        self.use_inst_norm = use_inst_norm
        self.resize_identity = (in_channels != out_channels) or (strides != 1)

        with self.name_scope():
            self.body = ResBottleneck(in_channels=in_channels,
                                      out_channels=out_channels,
                                      strides=strides,
                                      bn_use_global_stats=bn_use_global_stats,
                                      conv1_stride=False)
            if self.resize_identity:
                self.identity_conv = conv1x1_block(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bn_use_global_stats=bn_use_global_stats,
                    activate=False)
            if self.use_inst_norm:
                self.inst_norm = nn.InstanceNorm(in_channels=out_channels,
                                                 scale=True)
            self.activ = nn.Activation("relu")
Example #23
0
def get_transform_network():
    net = nn.Sequential()
    net.add(
        nn.Conv2D(32, kernel_size=9, strides=1, padding=4),
        nn.InstanceNorm(),
        nn.Activation('relu'),
        nn.Conv2D(64, kernel_size=3, strides=2, padding=1),
        nn.InstanceNorm(),
        nn.Activation('relu'),
        nn.Conv2D(128, kernel_size=3, strides=2, padding=1),
        nn.InstanceNorm(),
        nn.Activation('relu'),
        ResidualBlock(),  # 五个ResidualBlock的通道数和大小和他们的输入一样
        ResidualBlock(),
        ResidualBlock(),
        ResidualBlock(),
        ResidualBlock(),
        nn.Conv2DTranspose(64,
                           kernel_size=3,
                           strides=2,
                           padding=1,
                           output_padding=1),
        nn.InstanceNorm(),
        nn.Activation('relu'),
        nn.Conv2DTranspose(32,
                           kernel_size=3,
                           strides=2,
                           padding=1,
                           output_padding=1),
        nn.InstanceNorm(),
        nn.Activation('relu'),
        nn.Conv2D(3, kernel_size=9, strides=1, padding=4),
        nn.InstanceNorm(),
        nn.Activation('tanh'))
    # 图像输入到CNN之前,先进行标准化, 使其范围在0-1之间,
    return net
Example #24
0
def test_instancenorm():
    layer = nn.InstanceNorm(in_channels=10)
    check_layer_forward(layer, (2, 10, 10, 10))
Example #25
0
from spectral_norm import SNConv2D, SNDense


class InstanceNorm2D(nn.HybridBlock):
    def __init__(self, dim=1, eps=1e-5):
        super(InstanceNorm2D, self).__init__()
        self.eps = eps

    def hybrid_forward(self, F, x):
        diff = F.broadcast_sub(x, x.mean((0, 1), exclude=True, keepdims=True))
        var = diff.square().mean((0, 1), exclude=True, keepdims=True)
        out = F.broadcast_div(diff, ((var + self.eps).sqrt()))
        return out


InstanceNorm2D = lambda dim=1, eps=1e-5: nn.InstanceNorm(
    axis=dim, epsilon=eps, center=False)


def var(x, dim, keepdims=False, unbiased=True):
    F = mx.nd if isinstance(x, mx.nd.NDArray) else mx.sym
    s = F.broadcast_sub(x,
                        x.mean(dim,
                               keepdims=True)).square().sum(dim,
                                                            keepdims=keepdims)
    with autograd.pause():
        shape = x.shape_array()
        if isinstance(dim, (list, tuple)):
            n = F.prod(F.concat(*[shape[d] for d in dim], dim=0))
        else:
            n = shape[dim]
        if unbiased: