Beispiel #1
0
 def __init__(self, in_channels, n_layers=3, ndf=64, use_sigmoid=False, use_bias=False):
     super(Discriminator, self).__init__()
     # 用下面一段代码来配置标准的2x 下采样卷积
     kernel_size=4
     padding = int(np.ceil((kernel_size-1)/2))
     self.model = nn.HybridSequential()
     # 先用一个卷积将输入转为第一层feature map
     self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2, padding=padding, use_bias=use_bias, in_channels=in_channels))
     self.model.add(LeakyReLU(alpha=0.2))
     
     nf_mult = 1
     for n in range(1, n_layers):
         nf_mult_prev = nf_mult
         nf_mult = min(2**n, 8)
         self.model.add(
             Conv2D(channels=ndf*nf_mult, kernel_size=kernel_size, strides=2, padding=padding, use_bias=use_bias, in_channels=ndf*nf_mult_prev),
             BatchNorm(momentum=0.1, in_channels=ndf*nf_mult),
             LeakyReLU(alpha=0.2))
     
     # 若layers较少,channel未达到512, 可以继续升一点维度
     nf_mult_prev = nf_mult
     nf_mult = min(2**n_layers, 8)
     self.model.add(
         Conv2D(channels=ndf*nf_mult, kernel_size=kernel_size, strides=1, padding=padding, use_bias=use_bias, in_channels=ndf*nf_mult_prev),
         BatchNorm(momentum=0.1, in_channels=ndf*nf_mult),
         LeakyReLU(alpha=0.2))
     # 输出: output channel为什么设为1?
     self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1, padding=padding, use_bias=True, in_channels=ndf*nf_mult))
     if use_sigmoid:
         self.model.add(Activation('sigmoid'))
Beispiel #2
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
            super(Encoder, self).__init__()
	    usetanh = True
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=latent, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels =latent, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))
Beispiel #3
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False):
        super(Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 4
            padding = int(np.ceil((kernel_size - 1)/2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = min(2 ** n, 8)
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n_layers, 8)
            self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))
Beispiel #4
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              last_relu=True,
              **kwargs):
     super(BottleneckV1, self).__init__(**kwargs)
     self.body = nn.HybridSequential(prefix='')
     self.body.add(
         nn.Conv2D(planes // self.expansion,
                   kernel_size=1,
                   strides=1,
                   in_channels=inplanes))
     self.body.add(BatchNorm(in_channels=planes // self.expansion))  # ->
     self.body.add(nn.Activation('relu'))
     self.body.add(
         _conv3x3(planes // self.expansion, stride,
                  planes // self.expansion))
     self.body.add(BatchNorm(in_channels=planes // self.expansion))
     self.body.add(nn.Activation('relu'))
     self.body.add(
         nn.Conv2D(planes,
                   kernel_size=1,
                   strides=1,
                   in_channels=planes // self.expansion))
     self.body.add(BatchNorm(in_channels=planes))
     self.body.add(nn.Activation('relu'))
     self.downsample = downsample
     self.last_relu = last_relu
Beispiel #5
0
    def __init__(self, channels, bn_mom, act_type, unit_name, use_se=True, res_scale_fac=0.2):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(_RiseResidualBlock, self).__init__(unit_name)
        self.act_type = act_type
        self.unit_name = unit_name
        self.res_scale_fac = res_scale_fac

        self.use_se = use_se

        # branch 0
        self.body = HybridSequential()
        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False,
                             prefix='%s_conv0' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn0' % self.unit_name))
        self.body.add(get_act(act_type, prefix='%s_%s0' % (unit_name, act_type)))

        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False,
                             prefix='%s_conv1' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn1' % self.unit_name))

        self.act0 = get_act(act_type, prefix='%s_%s1' % (unit_name, act_type))

        if use_se is True:
            self.se0 = _SqueezeExcitation('%s_se0' % unit_name, channels, 16, act_type)
    def __init__(self,
                 inner_channels,
                 outer_channels,
                 inner_block=None,
                 innermost=False,
                 outermost=False,
                 use_dropout=False,
                 use_bias=False,
                 final_out=3):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=final_out,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)
Beispiel #7
0
    def __init__(self, block, layers, channels, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():

            # pretrained weight 사용하려면, self.features = nn.HybridSequential(prefix='상관없음')을 사용하는 수밖에 없다.
            self.features = HybridSequential(prefix='')
            self.features.add(BatchNorm(
                scale=False, center=False))  # 의문점 하나 : 맨 앞에 왜 batch norm을???
            self.features.add(Conv2D(channels[0], 7, 2, 3, use_bias=False))
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))
            self.features.add(MaxPool2D(3, 2, 1))  # 4번째

            in_channels = channels[0]
            # 5(c2),6(c3),7(c4),8
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=in_channels))
                in_channels = channels[i + 1]
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))  # 10(c5)
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)    
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu1 = LeakyReLU(alpha=0.2)
            
            conv2 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            conv3 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm3 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu3 = LeakyReLU(alpha=0.2)


            res_block = [conv1, norm1, relu1, conv2, norm2, relu2,conv3, norm3, relu3]

            self.se = nn.HybridSequential(prefix='')
            self.se.add(nn.Dense(outer_channels // 16, use_bias=False))
            self.se.add(nn.Activation('relu'))
            self.se.add(nn.Dense(outer_channels, use_bias=False))
            self.se.add(nn.Activation('sigmoid'))
            
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
 def __init__(self, opts):
     super(BasicBlock, self).__init__()
     self.bblock = HybridSequential()
     if opts.bottle_neck:
         self.bblock.add(
             BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         if not opts.trans_block:
             self.bblock.add(LeakyReLU(alpha=.2))
         else:
             self.bblock.add(Activation(opts.activation))
         self.bblock.add(
             Conv2D(channels=int(opts.growth_rate * 4),
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    use_bias=opts.use_bias,
                    padding=(0, 0)))
         if opts.drop_out > 0:
             self.bblock.add(Dropout(opts.drop_out))
     self.bblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
     self.bblock.add(Activation(activation=opts.activation))
     self.bblock.add(
         Conv2D(channels=int(opts.growth_rate),
                kernel_size=(3, 3),
                strides=(1, 1),
                use_bias=opts.use_bias,
                padding=(1, 1)))
     if opts.drop_out > 0:
         self.bblock.add(Dropout(opts.drop_out))
Beispiel #10
0
    def __init__(self, name, nb_act_maps, ratio=16, act_type="relu"):

        super(_GatherExcitePlus, self).__init__(prefix=name)

        self.nb_act_maps = nb_act_maps
        self.body = HybridSequential(prefix="")

        nb_units_hidden = nb_act_maps // ratio
        with self.name_scope():
            # depthwise convolution
            # gather step
            self.body.add(Conv2D(nb_act_maps, kernel_size=3, padding=1, groups=nb_act_maps, strides=2, use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())
            self.body.add(Conv2D(nb_act_maps, kernel_size=3, padding=1, groups=nb_act_maps, strides=2, use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())
            self.body.add(Conv2D(nb_act_maps, kernel_size=3, padding=1, groups=nb_act_maps, strides=2, use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())

            # get excitement parameters
            self.body.add(Dense(nb_units_hidden))
            self.body.add(get_act(act_type))
            self.body.add(Dense(nb_act_maps))
            self.body.add(get_act("sigmoid"))
Beispiel #11
0
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            relu1 = LeakyReLU(alpha=0.2)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            res_block = [conv1, norm1, relu1, conv2, norm2, relu2]
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
Beispiel #12
0
    def __init__(self, channels, bn_mom, act_type, unit_name):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlock, self).__init__()
        self.act_type = act_type
        self.unit_name = unit_name

        self.body = HybridSequential()

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv0" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(
            Activation(self.act_type,
                       prefix="%s_%s0" % (self.unit_name, self.act_type)))

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv1" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))
Beispiel #13
0
    def __init__(self, count: int, depth: int) -> None:
        super(HybridSequential, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():
            self.add(Conv2D(64, 4, 2, 1, in_channels=depth))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(128, 4, 2, 1, use_bias=False, in_channels=64))
            self.add(BatchNorm(momentum=0.1, in_channels=128))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(256, 4, 2, 1, use_bias=False, in_channels=128))
            self.add(BatchNorm(momentum=0.1, in_channels=256))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(512, 4, padding=1, use_bias=False,
                            in_channels=256))
            self.add(BatchNorm(momentum=0.1, in_channels=512))
            self.add(LeakyReLU(0.2))
            self.add(Conv2D(count, 3, 2, padding=1, in_channels=512))

        for param in self.collect_params().values():
            param.initialize()
            if "bias" in param.name:
                param.set_data(zeros(param.data().shape))
            elif "gamma" in param.name:
                param.set_data(random_normal(1, 0.02, param.data().shape))
            elif "weight" in param.name:
                param.set_data(random_normal(0, 0.02, param.data().shape))
Beispiel #14
0
def build_generator(n_filters, n_channels, mx_ctx):
    netG = HybridSequential()
    with netG.name_scope():
        # Input is Z
        netG.add(Conv2DTranspose(n_filters * 8, kernel_size=4, strides=1, padding=0, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters * 4, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters * 2, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_channels, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("tanh"))

    netG.initialize(mx.init.Normal(0.02), ctx=mx_ctx)
    netG.hybridize()
    return netG
Beispiel #15
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False, latent=256, usetanh = False ):
            super(Decoder, self).__init__()
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 
	    nf_mult = 2 ** n_layers
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult/2, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=latent,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='relu'))
            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
                #self.model.add(LeakyReLU(alpha=0.2))
                if n==2:
                      self.model.add(Dropout(rate=0.5))
                self.model.add(Activation(activation='relu'))
            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))

            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='tanh'))
Beispiel #16
0
    def __init__(self,
                 name,
                 in_channels,
                 groups=2,
                 se_type="cSE",
                 use_residual=True,
                 act_type="relu",
                 **kwargs):

        super(_ShuffleBlock, self).__init__(prefix=name + "_")

        self.in_channels = in_channels
        self.nb_right_channels = in_channels // 2

        self.groups = groups

        self.body = HybridSequential(prefix="")
        self.use_residual = use_residual

        with self.name_scope():
            self.body.add(
                Conv2D(channels=self.nb_right_channels,
                       kernel_size=3,
                       strides=1,
                       padding=1,
                       groups=1,
                       use_bias=False))
            self.body.add(BatchNorm())
            self.body.add(get_act(act_type))
            self.body.add(
                Conv2D(channels=self.nb_right_channels,
                       kernel_size=3,
                       strides=1,
                       padding=1,
                       groups=1,
                       use_bias=False))
            self.body.add(BatchNorm())
            if se_type:
                if se_type == "cSE":
                    # apply squeeze excitation
                    self.body.add(
                        _ChannelSqueezeExcitation("se0",
                                                  self.nb_right_channels, 16,
                                                  act_type))
                elif se_type == "sSE":
                    self.body.add(_SpatialSqueezeExcitation("se0"))
                elif se_type == "scSE":
                    self.body.add(
                        _SpatialChannelSqueezeExcitation(
                            "se0", self.nb_right_channels, 2, act_type))
                else:
                    raise Exception(
                        'Unsupported Squeeze Excitation Module: Choose either [None, "cSE", "sSE", "scSE"'
                    )

        if self.use_residual:
            self.act = get_act(act_type)
        self.shufller = _ShuffleChannelsBlock(groups)
Beispiel #17
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False, usetanh = False ):
        super(CEGeneratorP, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=4096, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            #self.model.add(BatchNorm(momentum=0.1, in_channels =128, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))

            # Decoder
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult/2, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=4096,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='relu'))
            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
                #self.model.add(LeakyReLU(alpha=0.2))
                if n==2:
                      self.model.add(Dropout(rate=0.5))
                self.model.add(Activation(activation='relu'))
            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))

            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='tanh'))
Beispiel #18
0
    def __init__(self,
                 name,
                 channels,
                 bn_mom=0.9,
                 act_type='relu',
                 use_se=False):
        """
        Definition of the stem proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        """

        super(_StemRise, self).__init__(prefix=name + '_')

        self.body = HybridSequential(prefix='')

        with self.name_scope():
            # add all layers to the stem
            self.body.add(
                Conv2D(channels=64,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(
                Conv2D(channels=64,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(
                Conv2D(channels=128,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(
                Conv2D(channels=128,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
    def __init__(self,
                 innerblock=None,
                 outer_channels=32,
                 inner_channels=64,
                 use_bias=False):
        super(middlelayer, self).__init__()
        with self.name_scope():
            res_block_1 = Res_Block(outer_channels=outer_channels)
            res_block_2 = Res_Block(outer_channels=inner_channels)
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)

            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)
            de_conv = Conv2DTranspose(channels=outer_channels,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      in_channels=inner_channels,
                                      use_bias=use_bias)
            self.p_at = CA_M5(in_channel=inner_channels)
            self.c_at = CA_M4()

            res_block_3 = Res_Block(outer_channels=inner_channels)
            res_block_4 = Res_Block(outer_channels=outer_channels)
            res1 = res_block_1
            encoder = [en_conv, en_norm, en_relu]
            res2 = res_block_2
            res3 = res_block_3
            decoder = [de_conv, de_norm, de_relu]
            res4 = res_block_4

            self.encoder = HybridSequential()
            with self.encoder.name_scope():
                for block in encoder:
                    self.encoder.add(block)

            self.inner_block = innerblock

            self.res1 = res1
            self.res2 = res2
            self.res3 = res3
            self.res4 = res4

            self.decoder = HybridSequential()

            with self.decoder.name_scope():
                for block in decoder:
                    self.decoder.add(block)
    def __init__(self, base=18,
                 deconv_channels=(256, 128, 64),
                 deconv_kernels=(4, 4, 4),
                 pretrained=True,
                 root=os.path.join(os.getcwd(), 'models'),
                 use_dcnv2=False,
                 ctx=mx.cpu()):

        mxnet_version = float(mx.__version__[0:3])
        if mxnet_version < 1.5:
            logging.error("please upgrade mxnet version above 1.5.x")
            raise EnvironmentError

        super(UpConvResNet, self).__init__()
        self._use_dcnv2 = use_dcnv2
        self._resnet = get_resnet(base, pretrained=pretrained, root=root, ctx=ctx)
        self._upconv = HybridSequential('')
        with self._upconv.name_scope():
            for channel, kernel in zip(deconv_channels, deconv_kernels):
                kernel, padding, output_padding = self._get_conv_argument(kernel)
                if self._use_dcnv2:
                    '''
                    in paper, we first change the channels of the three upsampling layers to
                    256, 128, 64, respectively, to save computation, we then add one 3 x 3 deformable convolutional layer
                    before each up-convolution layer with channel 256, 128, 64 
                    '''
                    assert hasattr(contrib.cnn, 'ModulatedDeformableConvolution'), \
                        "No ModulatedDeformableConvolution found in mxnet, consider upgrade to mxnet 1.6.0..."
                    self._upconv.add(contrib.cnn.ModulatedDeformableConvolution(channels=channel,
                                                                                kernel_size=3,
                                                                                strides=1,
                                                                                padding=1,
                                                                                use_bias=False,
                                                                                num_deformable_group=1))
                else:
                    self._upconv.add(Conv2D(channels=channel,
                                            kernel_size=3,
                                            strides=1,
                                            padding=1, use_bias=False))
                self._upconv.add(BatchNorm(momentum=0.9))
                self._upconv.add(Activation('relu'))
                self._upconv.add(Conv2DTranspose(channels=channel,
                                                 kernel_size=kernel,
                                                 strides=2,
                                                 padding=padding,
                                                 output_padding=output_padding,
                                                 use_bias=False,
                                                 weight_initializer=mx.init.Bilinear()))
                self._upconv.add(BatchNorm(momentum=0.9))
                self._upconv.add(Activation('relu'))

        self._upconv.initialize(ctx=ctx)
        logging.info(f"{self.__class__.__name__} weight init 완료")
Beispiel #21
0
    def __init__(self,
                 name,
                 channels,
                 bn_mom=0.9,
                 act_type="relu",
                 se_type=None):
        """
        Definition of the stem proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        """

        super(_StemRise, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            # add all layers to the stem
            self.body.add(
                Conv2D(channels=channels // 2,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            if se_type:
                if se_type == "cSE":
                    # apply squeeze excitation
                    # self.se = _ChannelSqueezeExcitation("se0", channels, 2, act_type)
                    self.body.add(
                        _ChannelSqueezeExcitation("se0", channels // 2, 16,
                                                  act_type))
                elif se_type == "sSE":
                    self.body.add(_SpatialSqueezeExcitation("se0"))
                elif se_type == "scSE":
                    self.body.add(
                        _SpatialChannelSqueezeExcitation(
                            "se0", channels // 2, 2, act_type))
                else:
                    raise Exception(
                        'Unsupported Squeeze Excitation Module: Choose either [None, "cSE", "sSE", "scSE"'
                    )
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
Beispiel #22
0
    def __init__(self, unit_name, channels, bn_mom, act_type, se_type="scSE"):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlockX, self).__init__(unit_name + "_")
        self.act_type = act_type
        self.unit_name = unit_name
        self.body = HybridSequential(prefix="")
        self.channels = channels

        with self.name_scope():
            if se_type:
                if se_type == "cSE":
                    # apply squeeze excitation
                    self.body.add(
                        _ChannelSqueezeExcitation("se0", channels, 2,
                                                  act_type))
                elif se_type == "sSE":
                    self.body.add(_SpatialSqueezeExcitation("se0"))
                elif se_type == "scSE":
                    self.body.add(
                        _SpatialChannelSqueezeExcitation(
                            "se0", channels, 2, act_type))
                elif se_type == "GE+":
                    self.body.add(
                        _GatherExcitePlus("ge0", channels, 2, act_type))
                else:
                    raise Exception(
                        'Unsupported Squeeze Excitation Module: Choose either [None, "cSE", "sSE", "scSE",'
                        '"GE+')
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(self.act_type))
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=3,
                       padding=1,
                       groups=1,
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(self.act_type))
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=3,
                       padding=1,
                       groups=1,
                       use_bias=False))
Beispiel #23
0
    def __init__(self,
                 name,
                 channels=2,
                 n_labels=4992,
                 bn_mom=0.9,
                 act_type="relu",
                 select_policy_from_plane=False):
        """
        Definition of the value head proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param se_type: SqueezeExcitation type choose either [None, "cSE", "sSE", csSE"] for no squeeze excitation,
        channelwise squeeze excitation, channel-spatial-squeeze-excitation, respectively
        """

        super(_PolicyHeadAlphaZero, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")
        self.select_policy_from_plane = select_policy_from_plane

        with self.name_scope():
            if self.select_policy_from_plane:
                self.body.add(
                    Conv2D(channels=256,
                           padding=1,
                           kernel_size=(3, 3),
                           use_bias=False))
                self.body.add(BatchNorm(momentum=bn_mom))
                self.body.add(get_act(act_type))
                self.body.add(
                    Conv2D(channels=channels,
                           padding=1,
                           kernel_size=(3, 3),
                           use_bias=False))
                self.body.add(Flatten())
            else:
                self.body.add(
                    Conv2D(channels=channels,
                           kernel_size=(1, 1),
                           use_bias=False))
                self.body.add(BatchNorm(momentum=bn_mom))
                # if not self.select_policy_from_plane:
                self.body.add(get_act(act_type))

                self.body.add(Flatten())
                self.body.add(Dense(units=n_labels))
Beispiel #24
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False):
        super(CEGenerator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 4
            padding = int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
            self.model.add(LeakyReLU(alpha=0.2))

            # Decoder
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=1,
                                           padding=padding, in_channels=ndf * nf_mult,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2))
            self.model.add(LeakyReLU(alpha=0.2))

            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2))
                self.model.add(LeakyReLU(alpha=0.2))

            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))
            self.model.add(LeakyReLU(alpha=0.2))
Beispiel #25
0
Datei: rl.py Projekt: tsuberim/RL
    def __init__(self, n_dims=128, **kwargs):
        PersistentBlock.__init__(self, **kwargs)
        if n_dims < 16:
            raise ValueError('`n_dims` must be at least 16 (given: %d)' %
                             n_dims)

        self.encoder = Sequential()
        self.encoder.add(BatchNorm(), Conv2D(int(n_dims / 16), 6, (4, 3)),
                         Activation('relu'), Conv2D(int(n_dims / 8), 3),
                         Activation('relu'), Conv2D(int(n_dims / 2), 3),
                         BatchNorm(), MaxPool2D(), Activation('relu'),
                         Conv2D(int(n_dims),
                                3), MaxPool2D(), Activation('relu'),
                         Conv2D(int(n_dims), 3), MaxPool2D(),
                         Activation('relu'), Flatten())
Beispiel #26
0
 def __init__(self, block, int_channels, out_channels, blocks, alpha, beta,
              stride, hw):
     super(BLModule, self).__init__()
     self.hw = hw
     self.big = blm_make_layer(block,
                               int_channels,
                               out_channels,
                               blocks - 1,
                               2,
                               last_relu=False)
     self.little_e = nn.HybridSequential(prefix='')
     self.little_e.add(
         blm_make_layer(block, int_channels, out_channels // alpha,
                        max(1, blocks // beta - 1)))
     self.little_e.add(
         nn.Conv2D(out_channels,
                   kernel_size=1,
                   in_channels=out_channels // alpha))
     self.little_e.add(BatchNorm(in_channels=out_channels))
     self.relu = nn.Activation('relu')
     self.fusion = blm_make_layer(block,
                                  out_channels,
                                  out_channels,
                                  1,
                                  stride=stride)
Beispiel #27
0
def blm_make_layer(block, inplanes, planes, blocks, stride=1, last_relu=True):
    downsample = nn.HybridSequential(prefix='')
    with downsample.name_scope():
        if stride != 1:
            downsample.add(nn.AvgPool2D(3, strides=2, padding=1))
        if inplanes != planes:
            downsample.add(
                nn.Conv2D(planes,
                          kernel_size=1,
                          strides=1,
                          in_channels=inplanes))
            downsample.add(BatchNorm(in_channels=planes))
    layers = nn.HybridSequential(prefix='')
    with layers.name_scope():
        if blocks == 1:
            layers.add(
                block(inplanes, planes, stride=stride, downsample=downsample))
        else:
            layers.add(
                block(inplanes, planes, stride=stride, downsample=downsample))
            for i in range(1, blocks):
                layers.add(
                    block(planes,
                          planes,
                          last_relu=last_relu if i == blocks - 1 else True))
    return layers
Beispiel #28
0
def lateral_conv(channels, kernel_size, strides, padding):
    lateral = HybridSequential()
    with lateral.name_scope():
        lateral.add(Conv2D(channels, kernel_size, strides, padding))
        lateral.add(BatchNorm(momentum=0.9, epsilon=1e-5))
        lateral.add(Activation('relu'))
    return lateral
Beispiel #29
0
    def __init__(self, name, channels=1, fc0=256, bn_mom=0.9, act_type="relu"):
        """
        Definition of the value head proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param fc0: Number of units in Dense/Fully-Connected layer
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        """

        super(_ValueHeadAlphaZero, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=fc0))
            self.body.add(get_act(act_type))
            self.body.add(Dense(units=1))
            self.body.add(get_act("tanh"))
Beispiel #30
0
    def __init__(self, name, channels=1, fc0=256, bn_mom=0.9, act_type="relu"):
        """
        Definition of the value head. Same as alpha zero authors but changed order Batch-Norm with RElu.

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param fc0: Number of units in Dense/Fully-Connected layer
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param se_type: SqueezeExcitation type choose either [None, "cSE", "sSE", csSE"] for no squeeze excitation,
        channelwise squeeze excitation, channel-spatial-squeeze-excitation, respectively
        """

        super(_ValueHeadRise, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=fc0))
            self.body.add(get_act(act_type))
            self.body.add(Dense(units=1))
            self.body.add(get_act("tanh"))