Example #1
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
            super(Encoder, self).__init__()
	    usetanh = True
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=latent, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels =latent, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))
Example #2
0
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            relu1 = LeakyReLU(alpha=0.2)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            res_block = [conv1, norm1, relu1, conv2, norm2, relu2]
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
    def __init__(self, opts):
        super(BasicBlock, self).__init__()
        self.bblock = HybridSequential()
        if opts.bottle_neck:
            if opts.norm_type is 'batch':
                self.bblock.add(NormLayer())
            elif opts.norm_type is 'group':
                self.bblock.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.bblock.add(InstanceNorm())
            if opts.activation in ['leaky']:
                self.bblock.add(LeakyReLU(alpha=opts.alpha))
            else:
                self.bblock.add(Activation(opts.activation))
            self.bblock.add(Conv3D(channels=int(opts.growth_rate * 4), kernel_size=(opts.zKernelSize, 1, 1),
                              strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
            if opts.drop_out > 0:
                self.bblock.add(Dropout(opts.drop_out))
        if opts.norm_type is 'batch':
            self.bblock.add(NormLayer())
        elif opts.norm_type is 'group':
            self.bblock.add(GroupNorm(in_channels=int(opts.growth_rate * 4)))
        elif opts.norm_type is 'instance':
            self.bblock.add(InstanceNorm())

        if opts.activation in ['leaky']:
            self.bblock.add(LeakyReLU(opts.alpha))
        else:
            self.bblock.add(Activation(opts.activation))
        self.bblock.add(Conv3D(channels=int(opts.growth_rate), kernel_size=(opts.zKernelSize, 3, 3),
                          strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 1, 1)))
        if opts.drop_out > 0:
            self.bblock.add(Dropout(opts.drop_out))
Example #4
0
 def __init__(self, in_channels, n_layers=3, ndf=64, use_sigmoid=False, use_bias=False):
     super(Discriminator, self).__init__()
     # 用下面一段代码来配置标准的2x 下采样卷积
     kernel_size=4
     padding = int(np.ceil((kernel_size-1)/2))
     self.model = nn.HybridSequential()
     # 先用一个卷积将输入转为第一层feature map
     self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2, padding=padding, use_bias=use_bias, in_channels=in_channels))
     self.model.add(LeakyReLU(alpha=0.2))
     
     nf_mult = 1
     for n in range(1, n_layers):
         nf_mult_prev = nf_mult
         nf_mult = min(2**n, 8)
         self.model.add(
             Conv2D(channels=ndf*nf_mult, kernel_size=kernel_size, strides=2, padding=padding, use_bias=use_bias, in_channels=ndf*nf_mult_prev),
             BatchNorm(momentum=0.1, in_channels=ndf*nf_mult),
             LeakyReLU(alpha=0.2))
     
     # 若layers较少,channel未达到512, 可以继续升一点维度
     nf_mult_prev = nf_mult
     nf_mult = min(2**n_layers, 8)
     self.model.add(
         Conv2D(channels=ndf*nf_mult, kernel_size=kernel_size, strides=1, padding=padding, use_bias=use_bias, in_channels=ndf*nf_mult_prev),
         BatchNorm(momentum=0.1, in_channels=ndf*nf_mult),
         LeakyReLU(alpha=0.2))
     # 输出: output channel为什么设为1?
     self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1, padding=padding, use_bias=True, in_channels=ndf*nf_mult))
     if use_sigmoid:
         self.model.add(Activation('sigmoid'))
Example #5
0
    def __init__(self, count: int, depth: int) -> None:
        super(HybridSequential, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():
            self.add(Conv2D(64, 4, 2, 1, in_channels=depth))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(128, 4, 2, 1, use_bias=False, in_channels=64))
            self.add(BatchNorm(momentum=0.1, in_channels=128))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(256, 4, 2, 1, use_bias=False, in_channels=128))
            self.add(BatchNorm(momentum=0.1, in_channels=256))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(512, 4, padding=1, use_bias=False,
                            in_channels=256))
            self.add(BatchNorm(momentum=0.1, in_channels=512))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(count, 3, 2, padding=1, in_channels=512))

        for param in self.collect_params().values():
            param.initialize()
            if "bias" in param.name:
                param.set_data(zeros(param.data().shape))
            elif "gamma" in param.name:
                param.set_data(random_normal(1, 0.02, param.data().shape))
            elif "weight" in param.name:
                param.set_data(random_normal(0, 0.02, param.data().shape))
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)    
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu1 = LeakyReLU(alpha=0.2)
            
            conv2 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            conv3 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm3 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu3 = LeakyReLU(alpha=0.2)


            res_block = [conv1, norm1, relu1, conv2, norm2, relu2,conv3, norm3, relu3]

            self.se = nn.HybridSequential(prefix='')
            self.se.add(nn.Dense(outer_channels // 16, use_bias=False))
            self.se.add(nn.Activation('relu'))
            self.se.add(nn.Dense(outer_channels, use_bias=False))
            self.se.add(nn.Activation('sigmoid'))
            
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
Example #7
0
File: P2PGAN.py Project: wshaow/GAN
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False):
        super(Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 4
            padding = int(np.ceil((kernel_size - 1)/2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = min(2 ** n, 8)
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n_layers, 8)
            self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))
Example #8
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False, usetanh = False ):
        super(CEGeneratorP, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=4096, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            #self.model.add(BatchNorm(momentum=0.1, in_channels =128, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))

            # Decoder
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult/2, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=4096,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='relu'))
            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
                #self.model.add(LeakyReLU(alpha=0.2))
                if n==2:
                      self.model.add(Dropout(rate=0.5))
                self.model.add(Activation(activation='relu'))
            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))

            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='tanh'))
Example #9
0
    def __init__(self, count: int, depth: int, frac = 100) -> None:
        super(Network, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():           
            self.add(Dense(int(1475/frac)))

            self.add(LeakyReLU(alpha=0.2))

            layer = Identity(512, 512)
            layer = Skip(int(2949/frac), int(5898/frac), layer)

            layer.block.add(Dropout(0.5))
# 48 x 48 x 64 =  147456
# 24 x 24 x 512 = 294912
# 48 x 48 x 512 = 1179648

            layer = Skip(int(589824/frac), int(1179648/frac), layer)
            layer = Skip(int(147456/frac), int(294912/frac), layer)
            layer = Skip(int(36864/frac), int(73728/frac), layer)

            self.add(layer)
            self.add(Dense(int(27648)))
        
            self.add(Activation("sigmoid"))
    def __init__(self,
                 inner_channels,
                 outer_channels,
                 inner_block=None,
                 innermost=False,
                 outermost=False,
                 use_dropout=False,
                 use_bias=False,
                 final_out=3):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=final_out,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)
    def __init__(self, opts):
        super(DenseMultipathNet, self).__init__()
        opts.units = opts.units[:opts.num_stage]
        assert (len(opts.units) == opts.num_stage)

        num_filters = opts.init_channels
        num_filters_list = []
        for stage in range(opts.num_stage):
            num_filters += opts.units[stage] * opts.growth_rate
            num_filters = int(floor(num_filters * opts.reduction))
            num_filters_list.append(num_filters)

        self.net = HybridSequential()
        with self.net.name_scope():
            self.blocks = EncoderDecoderUnit(opts, num_filters_list[opts.num_stage-1], opts.num_stage-1, innermost=True)
            for stage in range(opts.num_stage-2, -1, -1):
                self.blocks = EncoderDecoderUnit(opts, num_filters_list[stage], stage, inner_block=self.blocks)
            self.net.add(FirstBlock(opts))
            self.net.add(self.blocks)
            self.net.add(ResDBlock(opts, num_filters=16))
            if opts.norm_type is 'batch':
                self.net.add(NormLayer())
            elif opts.norm_type is 'group':
                self.net.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())

            if opts.activation in ['leaky']:
                self.net.add(LeakyReLU(opts.alpha))
            else:
                self.net.add(Activation(opts.activation))
            self.net.add(Conv3D(kernel_size=(1, 1, 1), channels=2, use_bias=opts.use_bias))
            self.net.add(Softmax())
Example #12
0
 def __init__(self, opts):
     super(BasicBlock, self).__init__()
     self.bblock = HybridSequential()
     if opts.bottle_neck:
         self.bblock.add(
             BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         if not opts.trans_block:
             self.bblock.add(LeakyReLU(alpha=.2))
         else:
             self.bblock.add(Activation(opts.activation))
         self.bblock.add(
             Conv2D(channels=int(opts.growth_rate * 4),
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    use_bias=opts.use_bias,
                    padding=(0, 0)))
         if opts.drop_out > 0:
             self.bblock.add(Dropout(opts.drop_out))
     self.bblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
     self.bblock.add(Activation(activation=opts.activation))
     self.bblock.add(
         Conv2D(channels=int(opts.growth_rate),
                kernel_size=(3, 3),
                strides=(1, 1),
                use_bias=opts.use_bias,
                padding=(1, 1)))
     if opts.drop_out > 0:
         self.bblock.add(Dropout(opts.drop_out))
Example #13
0
    def __init__(self, count: int, depth: int) -> None:
        super(Network, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():
            self.add(Conv2D(64, 4, 2, 1, in_channels=depth))
            self.add(LeakyReLU(alpha=0.2))

            layer = Identity(512, 512)
            layer = Skip(512, 512, layer)

            for _ in range(0):
                layer = Skip(512, 512, layer)

                layer.block.add(Dropout(0.5))

            layer = Skip(256, 256, layer)
            layer = Skip(128, 128, layer)
            layer = Skip(64, 64, layer)

            self.add(layer)
            self.add(Conv2DTranspose(count, 4, 2, 1, in_channels=128))
            self.add(Activation("sigmoid"))

        for param in self.collect_params().values():
            param.initialize()
            if "bias" in param.name:
                param.set_data(zeros(param.data().shape))
            elif "gamma" in param.name:
                param.set_data(random_normal(1, 0.02, param.data().shape))
            elif "weight" in param.name:
                param.set_data(random_normal(0, 0.02, param.data().shape))
Example #14
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
        super(Encoder, self).__init__()
        self.model = HybridSequential()
        kernel_size = 5
        padding = 0 #int(np.ceil((kernel_size - 1) / 2))
        self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                              padding=2, in_channels=in_channels))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=32))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                              padding=2, in_channels=128))
        self.model.add(LeakyReLU(alpha=0.2))

        self.model.add(gluon.nn.Dense(latent))
        self.model.add(LeakyReLU(alpha=0.2))
Example #15
0
 def create_model():
     model = HybridSequential()
     layers = [
         Conv2D(6, kernel_size=5),
         Reduce('b c (h h2) (w w2) -> b c h w', 'max', h2=2, w2=2),
         Conv2D(16, kernel_size=5),
         Reduce('b c (h h2) (w w2) -> b c h w', 'max', h2=2, w2=2),
         Rearrange('b c h w -> b (c h w)'),
         Dense(120),
         LeakyReLU(alpha=0.0),
         Dense(84),
         LeakyReLU(alpha=0.0),
         Dense(10),
     ]
     for layer in layers:
         model.add(layer)
     model.initialize(mxnet.init.Xavier(), ctx=mxnet.cpu())
     return model
    def __init__(self,
                 innerblock=None,
                 outer_channels=32,
                 inner_channels=64,
                 use_bias=False):
        super(middlelayer, self).__init__()
        with self.name_scope():
            res_block_1 = Res_Block(outer_channels=outer_channels)
            res_block_2 = Res_Block(outer_channels=inner_channels)
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)

            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)
            de_conv = Conv2DTranspose(channels=outer_channels,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      in_channels=inner_channels,
                                      use_bias=use_bias)
            self.p_at = CA_M5(in_channel=inner_channels)
            self.c_at = CA_M4()

            res_block_3 = Res_Block(outer_channels=inner_channels)
            res_block_4 = Res_Block(outer_channels=outer_channels)
            res1 = res_block_1
            encoder = [en_conv, en_norm, en_relu]
            res2 = res_block_2
            res3 = res_block_3
            decoder = [de_conv, de_norm, de_relu]
            res4 = res_block_4

            self.encoder = HybridSequential()
            with self.encoder.name_scope():
                for block in encoder:
                    self.encoder.add(block)

            self.inner_block = innerblock

            self.res1 = res1
            self.res2 = res2
            self.res3 = res3
            self.res4 = res4

            self.decoder = HybridSequential()

            with self.decoder.name_scope():
                for block in decoder:
                    self.decoder.add(block)
Example #17
0
def get_act(act_type, **kwargs):
    if act_type in ["relu", "sigmoid", "softrelu", "softsign", "tanh"]:
        return Activation(act_type, **kwargs)
    if act_type == "prelu":
        return PReLU(**kwargs)
    if act_type == "selu":
        return SELU(**kwargs)
    if act_type == "swish":
        return Swish(**kwargs)
    if act_type == "lrelu":
        return LeakyReLU(alpha=0.2, **kwargs)
    raise NotImplementedError
Example #18
0
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm):
    """A common conv-bn-leakyrelu cell"""
    cell = HybridSequential(prefix='')
    cell.add(
        Conv2D(channel,
               kernel_size=kernel,
               strides=stride,
               padding=padding,
               use_bias=False))
    cell.add(norm_layer(epsilon=1e-5, momentum=0.9))
    cell.add(LeakyReLU(0.1))
    return cell
Example #19
0
 def __init__(self,in_channel, out_channel):
     super(decoder,self).__init__()
     with self.name_scope():
         de_conv=Conv2DTranspose(channels=out_channel, kernel_size=4, strides=2, padding=1,
                                       in_channels=in_channel)
         norm = BatchNorm(momentum=0.1, in_channels=out_channel)
         relu = LeakyReLU(alpha=0.2)
     decode=[de_conv,norm,relu]
     self.decoder = HybridSequential()
     with self.decoder.name_scope():
         for block in decode:
             self.decoder.add(block)
Example #20
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False, istest = False, isthreeway = False):
        super(Discriminator, self).__init__()
        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                                  padding=2, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=32))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=128))
            self.model.add(LeakyReLU(alpha=0.2))

            self.model.add(gluon.nn.Dense(1))

            if isthreeway:
                self.model.add(gluon.nn.Dense(3))
            # elif use_sigmoid:
            self.model.add(Activation(activation='sigmoid'))
Example #21
0
 def __init__(self, **kwargs):
     super(Rec, self).__init__(**kwargs)
     with self.name_scope():
         self.otherfeatures = nn.Embedding(input_dim=_OTHER_LEN,
                                           output_dim=_EMB_DIM)
         self.features = HybridSequential()
         self.features.add(
             nn.Embedding(input_dim=_SEQ_LEN, output_dim=_EMB_DIM))
         # Transformer
         # Multi-head attention with base cell scaled dot-product attention
         # Use b=1 self-attention blocks per article recommendation
         self.cell = _get_attention_cell('multi_head',
                                         units=_UNITS,
                                         scaled=True,
                                         dropout=_DROP,
                                         num_heads=_NUM_HEADS,
                                         use_bias=False)
         self.proj = nn.Dense(units=_UNITS,
                              use_bias=False,
                              bias_initializer='zeros',
                              weight_initializer=None,
                              flatten=False)
         self.drop_out_layer = nn.Dropout(rate=_DROP)
         self.ffn = PositionwiseFFN(hidden_size=_UNITS,
                                    use_residual=True,
                                    dropout=_DROP,
                                    units=_UNITS,
                                    weight_initializer=None,
                                    bias_initializer='zeros',
                                    activation='leakyrelu')
         self.layer_norm = nn.LayerNorm(in_channels=_UNITS)
         # Final MLP layers; BST dimensions in the article were 1024, 512, 256
         self.output = HybridSequential()
         self.output.add(nn.Dense(8))
         self.output.add(LeakyReLU(alpha=0.1))
         self.output.add(nn.Dense(4))
         self.output.add(LeakyReLU(alpha=0.1))
         self.output.add(nn.Dense(2))
         self.output.add(LeakyReLU(alpha=0.1))
         self.output.add(nn.Dense(1))
Example #22
0
    def __init__(self, **kwargs):
        super(ResDiscriminator, self).__init__(**kwargs)

        # out = (in - ks) * strides - 2 * padding + ks + out_padding
        with self.name_scope():
            self.add(
                Conv2D(64, kernel_size=7, strides=2, padding=3,
                       use_bias=False),
                BatchNorm(),
                LeakyReLU(0.2),
                MaxPool2D(pool_size=(2, 2)),
                # out (bs, 64, 64, 64)
                ResidualBlock(64, in_channels=64, downsample=False),
                ResidualBlock(64, in_channels=64, downsample=False),
                # out (bs, 64, 32, 32)
                ResidualBlock(128,
                              in_channels=64,
                              strides=(2, 1),
                              downsample=True),
                ResidualBlock(128, in_channels=128, downsample=False),
                # out (bs, 128, 16, 16)
                ResidualBlock(256,
                              in_channels=128,
                              strides=(2, 1),
                              downsample=True),
                ResidualBlock(256, in_channels=256, downsample=False),
                # out (bs, 256, 8, 8)
                ResidualBlock(512,
                              in_channels=258,
                              strides=(2, 1),
                              downsample=True),
                ResidualBlock(512, in_channels=512, downsample=False),
                # out (bs, 512, 4, 4)
                BatchNorm(),
                LeakyReLU(0.2),
                GlobalAvgPool2D(),
                Dense(128),
                LeakyReLU(0.2),
                Dense(1))
Example #23
0
def get_act(act_type, **kwargs):
    """Wrapper method for different non linear activation functions"""
    if act_type in ["relu", "sigmoid", "softrelu", "softsign", "tanh"]:
        return Activation(act_type, **kwargs)
    if act_type == "prelu":
        return PReLU(**kwargs)
    if act_type == "selu":
        return SELU(**kwargs)
    if act_type == "swish":
        return Swish(**kwargs)
    if act_type == "lrelu":
        return LeakyReLU(alpha=0.2, **kwargs)
    raise NotImplementedError
Example #24
0
def get_act(act_type, **kwargs):
    if act_type in ['relu', 'sigmoid', 'softrelu', 'softsign', 'tanh']:
        return Activation(act_type, **kwargs)
    elif act_type == 'prelu':
        return PReLU(**kwargs)
    elif act_type == 'selu':
        return SELU(**kwargs)
    elif act_type == 'swish':
        return Swish(**kwargs)
    elif act_type == 'lrelu':
        return LeakyReLU(alpha=0.2, **kwargs)
    else:
        raise NotImplementedError
    def __init__(self, in_units, activation=LeakyReLU(0.2), **kwargs):
        super().__init__(**kwargs)

        with self.name_scope():
            self.coefficient_net = HybridSequential()

            dense = Dense(in_units=in_units,
                          units=1,
                          use_bias=False,
                          flatten=False)
            self.coefficient_net.add(dense)
            self.coefficient_net.add(activation)

        self.softmax = MaskedSoftmax(axis=1, keepdims=True)
Example #26
0
def build_discriminator(n_filters, n_channels, mx_ctx):
    netD = HybridSequential()
    with netD.name_scope():
        # Input is n_channels * 64 * 64
        netD.add(Conv2D(n_filters, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 2, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 4, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 8, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(1, 4, 1, 0, use_bias=False))

    netD.initialize(mx.init.Normal(0.02), ctx=mx_ctx)
    netD.hybridize()
Example #27
0
    def __init__(self, count: int, depth: int, layer: Layer) -> None:
        super(Skip, self).__init__()

        with self.name_scope():
            self._block = HybridSequential()

            self._block.add(Conv2D(layer.depth, 4, 2, 1, use_bias=False, in_channels=depth))
            self._block.add(BatchNorm(momentum=0.1, in_channels=layer.depth))
            self._block.add(LeakyReLU(0.2))
            self._block.add(layer)
            self._block.add(Conv2DTranspose(count, 4, 2, 1, use_bias=False, in_channels=layer.count))
            self._block.add(BatchNorm(momentum=0.1, in_channels=count))

        self._count = count
        self._depth = depth
        self._layer = layer
Example #28
0
    def __init__(self, count, depth: int, layer: Layer) -> None:
        super(Skip, self).__init__()

        with self.name_scope():
            self._block = HybridSequential()
# 48 x 48
            self._block.add(Dense(count)) 

            self._block.add(LeakyReLU(0.2))
            self._block.add(layer)
# 48 x 48
            self._block.add(Dense(depth))

        self._count = count
        self._depth = depth
        self._layer = layer
 def __init__(self, opts, num_filters, pool_type='avg'):
     super(TransitionBlock, self).__init__()
     self.pool_type = pool_type
     self.tblock = HybridSequential()
     if opts.norm_type is 'batch':
         self.tblock.add(NormLayer())
     elif opts.norm_type is 'group':
         self.tblock.add(GroupNorm())
     elif opts.norm_type is 'instance':
         self.tblock.add(InstanceNorm())
     if opts.activation in ['leaky']:
         self.tblock.add(LeakyReLU(opts.alpha))
     else:
         self.tblock.add(Activation(opts.activation))
     self.tblock.add(Conv3D(channels=int(num_filters * opts.reduction), kernel_size=(opts.zKernelSize, 1, 1),
                       strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
     if opts.drop_out > 0:
         self.tblock.add(Dropout(opts.drop_out))
    def __init__(self, opts, num_filters, res_block=True, factor=1, group=1):
        super(DecoderBlock, self).__init__()
        self.dcblock = HybridSequential()
        if res_block:
            self.dcblock.add(ResDBlock(opts, num_filters * 4, group=group))
        if opts.norm_type is 'batch':
            self.dcblock.add(NormLayer())
        elif opts.norm_type is 'group':
            self.dcblock.add(GroupNorm())
        elif opts.norm_type is 'instance':
            self.dcblock.add(InstanceNorm())

        if opts.activation in ['leaky']:
            self.dcblock.add(LeakyReLU(opts.alpha))
        else:
            self.dcblock.add(Activation(opts.activation))
        self.dcblock.add(Conv3DTranspose(channels=int(num_filters / factor), kernel_size=(opts.zKernelSize, 2, 2),
                                         strides=(opts.zStride, 2, 2), padding=(opts.zPad, 0, 0), use_bias=opts.use_bias))