Esempio n. 1
0
def vgg_block(num_convs, num_channels):
    blk = Sequential()
    for _ in range(num_convs):
        blk.add(Conv2D(num_channels, kernel_size=3,
                       padding=1, activation='relu'))
    blk.add(MaxPool2D(pool_size=2, strides=2))
    return blk
Esempio n. 2
0
def test_Conv2D(use_bias, groups):
    x = nd.uniform(shape=(2, 2, 5, 5))

    my_conv = MyConv(10,
                     3,
                     1,
                     1,
                     in_channels=2,
                     groups=groups,
                     use_bias=use_bias)
    my_conv.initialize()

    ref_conv = Conv2D(10,
                      3,
                      1,
                      1,
                      in_channels=2,
                      groups=groups,
                      use_bias=use_bias,
                      bias_initializer=init.Constant(my_conv.bias.data())
                      if use_bias else 'zero',
                      weight_initializer=init.Constant(my_conv.weight.data()))
    ref_conv.initialize()

    return (my_conv(x) - ref_conv(x)).abs().sum().asscalar()
Esempio n. 3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides=(1, 1),
                 padding=(0, 0),
                 dilation=(1, 1),
                 groups=1,
                 use_bias=False):
        super(SplitAttentionConv, self).__init__()
        radix = 2
        reduction = 2

        assert (in_channels == out_channels)

        self.radix = radix
        self.cardinality = groups
        self.out_channels = out_channels

        self.conv = Conv2D(
            out_channels * radix,
            kernel_size,
            strides,
            padding,
            dilation,
            groups=(groups * radix),
            in_channels=in_channels,
            use_bias=use_bias)
        self.bn = BatchNorm(in_channels=out_channels * radix)
        self.relu = Activation('relu')

        self.sa = SABlock(
            out_channels=out_channels,
            groups=groups,
            radix=radix)
Esempio n. 4
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     with self.name_scope():
         self.conv1 = Conv2D(64, kernel_size=(7, 7), padding=(3, 3))
         self.conv2 = Conv2D(64, kernel_size=(5, 5), padding=(2, 2))
         self.conv3 = Conv2D(64, kernel_size=(5, 5), padding=(2, 2))
         self.conv4 = Conv2D(64, kernel_size=(5, 5), padding=(2, 2))
         self.conv5 = Conv2D(48, kernel_size=(5, 5), padding=(2, 2))
         self.conv6 = Conv2D(48, kernel_size=(5, 5), padding=(2, 2))
         self.conv7 = Conv2D(48, kernel_size=(5, 5), padding=(2, 2))
         self.conv8 = Conv2D(32, kernel_size=(5, 5), padding=(2, 2))
         self.conv9 = Conv2D(32, kernel_size=(5, 5), padding=(2, 2))
         self.conv10 = Conv2D(32, kernel_size=(5, 5), padding=(2, 2))
         self.flatten = Flatten()
         self.dense1 = Dense(1024)
         self.dense2 = Dense(19 * 19)
Esempio n. 5
0
    def __init__(self, count: int, depth: int) -> None:
        super(Network, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():           
            self.add(Conv2D(64, 4, 2, 1, in_channels=depth))
            self.add(LeakyReLU(alpha=0.2))

            layer = Identity(512, 512)
            layer = Skip(512, 512, layer)

            for _ in range(0):
                layer = Skip(512, 512, layer)

                layer.block.add(Dropout(0.5))

            layer = Skip(256, 256, layer)
            layer = Skip(128, 128, layer)
            layer = Skip(64, 64, layer)

            self.add(layer)
            self.add(Conv2DTranspose(count, 4, 2, 1, in_channels=128))
            self.add(Activation("sigmoid"))

        for param in self.collect_params().values():
            param.initialize()
            if "bias" in param.name:
                param.set_data(zeros(param.data().shape))
            elif "gamma" in param.name:
                param.set_data(random_normal(1, 0.02, param.data().shape))
            elif "weight" in param.name:
                param.set_data(random_normal(0, 0.02, param.data().shape))
Esempio n. 6
0
    def __init__(self,
                 inner_channels,
                 outer_channels,
                 inner_block=None,
                 innermost=False,
                 outermost=False,
                 use_dropout=False,
                 use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)
    def __init__(self, nb_cls, **kwargs):
        super(multilabel_attach, self).__init__(**kwargs)

        self.nb_cls = nb_cls
        self.conv1 = Conv2D(nb_cls, kernel_size=1, use_bias=False, prefix='tail')
        self.fc1 = Dense(1024, flatten=False)
        self.fc2 = Dense(1024, flatten=False)
        self.out = Dense(1, flatten=False)
Esempio n. 8
0
 def create_model():
     model = HybridSequential()
     layers = [
         Conv2D(6, kernel_size=5),
         Reduce('b c (h h2) (w w2) -> b c h w', 'max', h2=2, w2=2),
         Conv2D(16, kernel_size=5),
         Reduce('b c (h h2) (w w2) -> b c h w', 'max', h2=2, w2=2),
         Rearrange('b c h w -> b (c h w)'),
         Dense(120),
         LeakyReLU(alpha=0.0),
         Dense(84),
         LeakyReLU(alpha=0.0),
         Dense(10),
     ]
     for layer in layers:
         model.add(layer)
     model.initialize(mxnet.init.Xavier(), ctx=mxnet.cpu())
     return model
Esempio n. 9
0
    def __init__(self, name):

        super(_SpatialSqueezeExcitation, self).__init__(prefix=name)

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(Conv2D(1, kernel_size=1, padding=0, use_bias=True))
            self.body.add(get_act("sigmoid"))
Esempio n. 10
0
 def __init__(self, opts):
     super(FirstBlock, self).__init__()
     self.fblock = HybridSequential()
     self.fblock.add(
         Conv2D(channels=opts.init_channels,
                kernel_size=(3, 3),
                strides=(1, 1),
                padding=(1, 1),
                use_bias=opts.use_bias))
Esempio n. 11
0
def ConvBatchAct(channels=1, kernel=1, stride=1, pad=0, num_group=1, active=True):
    net = HybridSequential()
    with net.name_scope():
        net.add(Conv2D(channels, kernel, stride, pad, groups=num_group, use_bias=False))
        net.add(BatchNorm(momentum=0.9))
        if active:
            net.add(Activation('relu'))
            # net.add(Swish())
    return net
Esempio n. 12
0
    def __init__(self,
                 unit_name,
                 cardinality,
                 channels,
                 bn_mom,
                 act_type,
                 res_scale_fac,
                 use_se=True):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlockX, self).__init__(unit_name + '_')
        self.act_type = act_type
        self.unit_name = unit_name
        self.res_scale_fac = res_scale_fac
        self.body = HybridSequential(prefix='')

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=3,
                       padding=1,
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(self.act_type))

            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=3,
                       padding=1,
                       groups=cardinality,
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))

            if use_se is True:
                # apply squeeze excitation
                self.body.add(_SqueezeExcitation('se0', channels, 16,
                                                 act_type))

            self.act0 = get_act(act_type)
Esempio n. 13
0
    def __init__(self,
                 channels,
                 bn_mom,
                 act_type,
                 unit_name,
                 use_se=True,
                 res_scale_fac=0.2):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(_RiseResidualBlock, self).__init__(unit_name)
        self.act_type = act_type
        self.unit_name = unit_name
        self.res_scale_fac = res_scale_fac
        self.use_se = use_se
        # branch 0
        self.body = HybridSequential()
        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv0" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(
            get_act(act_type, prefix="%s_%s0" % (unit_name, act_type)))
        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv1" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))
        self.act0 = get_act(act_type, prefix="%s_%s1" % (unit_name, act_type))

        if use_se:
            self.se0 = _ChannelSqueezeExcitation("%s_se0" % unit_name,
                                                 channels, 16, act_type)
Esempio n. 14
0
    def __init__(self, name, nb_act_maps, ratio=16, act_type="relu"):

        super(_GatherExcitePlus, self).__init__(prefix=name)

        self.nb_act_maps = nb_act_maps
        self.body = HybridSequential(prefix="")

        nb_units_hidden = nb_act_maps // ratio
        with self.name_scope():
            # depthwise convolution
            # gather step
            self.body.add(
                Conv2D(nb_act_maps,
                       kernel_size=3,
                       padding=1,
                       groups=nb_act_maps,
                       strides=2,
                       use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())
            self.body.add(
                Conv2D(nb_act_maps,
                       kernel_size=3,
                       padding=1,
                       groups=nb_act_maps,
                       strides=2,
                       use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())
            self.body.add(
                Conv2D(nb_act_maps,
                       kernel_size=3,
                       padding=1,
                       groups=nb_act_maps,
                       strides=2,
                       use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())

            # get excitement parameters
            self.body.add(Dense(nb_units_hidden))
            self.body.add(get_act(act_type))
            self.body.add(Dense(nb_act_maps))
            self.body.add(get_act("sigmoid"))
Esempio n. 15
0
    def __init__(self,
                 innerblock=None,
                 outer_channels=32,
                 inner_channels=64,
                 use_bias=False):
        super(middlelayer, self).__init__()
        with self.name_scope():
            res_block_1 = Res_Block(outer_channels=outer_channels)
            res_block_2 = Res_Block(outer_channels=inner_channels)
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)

            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)
            de_conv = Conv2DTranspose(channels=outer_channels,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      in_channels=inner_channels,
                                      use_bias=use_bias)
            self.p_at = CA_M5(in_channel=inner_channels)
            self.c_at = CA_M4()

            res_block_3 = Res_Block(outer_channels=inner_channels)
            res_block_4 = Res_Block(outer_channels=outer_channels)
            res1 = res_block_1
            encoder = [en_conv, en_norm, en_relu]
            res2 = res_block_2
            res3 = res_block_3
            decoder = [de_conv, de_norm, de_relu]
            res4 = res_block_4

            self.encoder = HybridSequential()
            with self.encoder.name_scope():
                for block in encoder:
                    self.encoder.add(block)

            self.inner_block = innerblock

            self.res1 = res1
            self.res2 = res2
            self.res3 = res3
            self.res4 = res4

            self.decoder = HybridSequential()

            with self.decoder.name_scope():
                for block in decoder:
                    self.decoder.add(block)
Esempio n. 16
0
    def __init__(self, in_channels, ndf=1, n_layers=3, use_sigmoid=False, use_bias=False):
        super(label_Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 70
            padding = 24
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=8,
                                  padding=padding, in_channels=in_channels, use_bias=use_bias))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))
Esempio n. 17
0
    def __init__(self, **kwargs):
        super(Net, self).__init__(**kwargs)
        with self.name_scope():
            # layers created in name_scope will inherit name space
            # from parent layer.

            ############### Conv2D ###################
            self.conv1 = Conv2D(20, kernel_size=(5, 5))
            self.conv2 = Conv2D(50, kernel_size=(5, 5))
            ##########################################

            #################  DOConv2D  ######################
            # self.conv1 = DOConv2D(1, 20, kernel_size=(5, 5))
            # self.conv2 = DOConv2D(20, 50, kernel_size=(5, 5))
            ###################################################

            self.pool1 = nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
            self.pool2 = nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
            self.fc1 = nn.Dense(500)
            self.fc2 = nn.Dense(10)
Esempio n. 18
0
    def __init__(self, channels, bn_mom, act_type, unit_name):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlock, self).__init__()
        self.act_type = act_type
        self.unit_name = unit_name

        self.body = HybridSequential()

        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False, prefix='%s_conv0' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn0' % self.unit_name))
        self.body.add(Activation(self.act_type, prefix='%s_%s0' % (self.unit_name, self.act_type)))

        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False, prefix='%s_conv1' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn1' % self.unit_name))
Esempio n. 19
0
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm):
    """A common conv-bn-leakyrelu cell"""
    cell = HybridSequential(prefix='')
    cell.add(
        Conv2D(channel,
               kernel_size=kernel,
               strides=stride,
               padding=padding,
               use_bias=False))
    cell.add(norm_layer(epsilon=1e-5, momentum=0.9))
    cell.add(LeakyReLU(0.1))
    return cell
Esempio n. 20
0
    def __init__(self, channels, momentum=0.9, epsilon=1e-5):
        super(OutputFF, self).__init__()
        with self.name_scope():
            # self.weight1 = self.params.get('weight1', shape=(1,))
            # self.weight2 = self.params.get('weight2', shape=(1,))
            # self.weight3 = self.params.get('weight3', shape=(1,))

            # depthwise separable convolution
            self._dws = HybridSequential()
            self._dws.add(
                Conv2D(channels,
                       3,
                       strides=(1, 1),
                       padding=(1, 1),
                       groups=channels))
            self._dws.add(BatchNorm(momentum=momentum, epsilon=epsilon))
            self._dws.add(Activation('relu'))
            self._dws.add(
                Conv2D(channels, 1, strides=(1, 1), padding=(0, 0), groups=1))
            self._dws.add(BatchNorm(momentum=momentum, epsilon=epsilon))
            self._dws.add(Activation('relu'))
Esempio n. 21
0
 def __init__(self,in_channel, out_channel):
     super(encoder,self).__init__()
     with self.name_scope():
         en_conv = Conv2D(channels=out_channel, kernel_size=4, strides=2,
                          padding=1,in_channels=in_channel)
         norm = BatchNorm(momentum=0.1, in_channels=out_channel)
         relu = LeakyReLU(alpha=0.2)
     encode=[en_conv,norm,relu]
     self.encoder = HybridSequential()
     with self.encoder.name_scope():
         for block in encode:
             self.encoder.add(block)
Esempio n. 22
0
    def __init__(self, base=18,
                 deconv_channels=(256, 128, 64),
                 deconv_kernels=(4, 4, 4),
                 pretrained=True,
                 root=os.path.join(os.getcwd(), 'models'),
                 use_dcnv2=False,
                 ctx=mx.cpu()):

        mxnet_version = float(mx.__version__[0:3])
        if mxnet_version < 1.5:
            logging.error("please upgrade mxnet version above 1.5.x")
            raise EnvironmentError

        super(UpConvResNet, self).__init__()
        self._use_dcnv2 = use_dcnv2
        self._resnet = get_resnet(base, pretrained=pretrained, root=root, ctx=ctx)
        self._upconv = HybridSequential('')
        with self._upconv.name_scope():
            for channel, kernel in zip(deconv_channels, deconv_kernels):
                kernel, padding, output_padding = self._get_conv_argument(kernel)
                if self._use_dcnv2:
                    '''
                    in paper, we first change the channels of the three upsampling layers to
                    256, 128, 64, respectively, to save computation, we then add one 3 x 3 deformable convolutional layer
                    before each up-convolution layer with channel 256, 128, 64 
                    '''
                    assert hasattr(contrib.cnn, 'ModulatedDeformableConvolution'), \
                        "No ModulatedDeformableConvolution found in mxnet, consider upgrade to mxnet 1.6.0..."
                    self._upconv.add(contrib.cnn.ModulatedDeformableConvolution(channels=channel,
                                                                                kernel_size=3,
                                                                                strides=1,
                                                                                padding=1,
                                                                                use_bias=False,
                                                                                num_deformable_group=1))
                else:
                    self._upconv.add(Conv2D(channels=channel,
                                            kernel_size=3,
                                            strides=1,
                                            padding=1, use_bias=False))
                self._upconv.add(BatchNorm(momentum=0.9))
                self._upconv.add(Activation('relu'))
                self._upconv.add(Conv2DTranspose(channels=channel,
                                                 kernel_size=kernel,
                                                 strides=2,
                                                 padding=padding,
                                                 output_padding=output_padding,
                                                 use_bias=False,
                                                 weight_initializer=mx.init.Bilinear()))
                self._upconv.add(BatchNorm(momentum=0.9))
                self._upconv.add(Activation('relu'))

        self._upconv.initialize(ctx=ctx)
        logging.info(f"{self.__class__.__name__} weight init 완료")
Esempio n. 23
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False, istest = False, isthreeway = False):
        super(Discriminator, self).__init__()
        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                                  padding=2, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=32))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=128))
            self.model.add(LeakyReLU(alpha=0.2))

            self.model.add(gluon.nn.Dense(1))

            if isthreeway:
                self.model.add(gluon.nn.Dense(3))
            # elif use_sigmoid:
            self.model.add(Activation(activation='sigmoid'))
Esempio n. 24
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
        super(Encoder, self).__init__()
        self.model = HybridSequential()
        kernel_size = 5
        padding = 0 #int(np.ceil((kernel_size - 1) / 2))
        self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                              padding=2, in_channels=in_channels))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=32))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                              padding=2, in_channels=128))
        self.model.add(LeakyReLU(alpha=0.2))

        self.model.add(gluon.nn.Dense(latent))
        self.model.add(LeakyReLU(alpha=0.2))
Esempio n. 25
0
    def __init__(self,
                 name,
                 channels,
                 bn_mom=0.9,
                 act_type="relu",
                 nb_input_channels=34,
                 se_type=None):
        """
        Definition of the stem proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param nb_input_channels: Number of input channels of the board representation
        :param se_type: SqueezeExcitation type choose either [None, "cSE", "sSE", csSE"] for no squeeze excitation,
        """

        super(_StemAlphaZero, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():

            if se_type:
                # start with a proceeding batch norm layer
                self.body.add(BatchNorm(momentum=bn_mom))
                if se_type == "cSE":
                    # apply squeeze excitation
                    self.body.add(
                        _ChannelSqueezeExcitation("se0", nb_input_channels, 16,
                                                  act_type))
                elif se_type == "sSE":
                    self.body.add(_SpatialSqueezeExcitation("se0"))
                elif se_type == "csSE":
                    self.body.add(
                        _SpatialChannelSqueezeExcitation(
                            "se0", nb_input_channels, 1, act_type))
                else:
                    raise Exception(
                        'Unsupported Squeeze Excitation Module: Choose either [None, "cSE", "sSE", "csSE"'
                    )

            # add all layers to the stem
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
Esempio n. 26
0
    def __init__(self,
                 name,
                 channels=1,
                 fc0=256,
                 bn_mom=0.9,
                 act_type="relu",
                 se_type=None):
        """
        Definition of the value head proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param fc0: Number of units in Dense/Fully-Connected layer
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param se_type: SqueezeExcitation type choose either [None, "cSE", "sSE", csSE"] for no squeeze excitation,
        channelwise squeeze excitation, channel-spatial-squeeze-excitation, respectively
        """

        super(_ValueHeadAlphaZero, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))

            if se_type:
                if se_type == "cSE":
                    # apply squeeze excitation
                    self.body.add(
                        _ChannelSqueezeExcitation("se0", channels, 16,
                                                  act_type))
                elif se_type == "sSE":
                    self.body.add(_SpatialSqueezeExcitation("se0"))
                elif se_type == "csSE":
                    self.body.add(
                        _SpatialChannelSqueezeExcitation(
                            "se0", channels, 1, act_type))
                else:
                    raise Exception(
                        'Unsupported Squeeze Excitation Module: Choose either [None, "cSE", "sSE", "csSE"'
                    )

            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=fc0))
            self.body.add(get_act(act_type))
            self.body.add(Dense(units=1))
            self.body.add(get_act("tanh"))
Esempio n. 27
0
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu1 = LeakyReLU(alpha=0.2)

            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            conv3 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm3 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu3 = LeakyReLU(alpha=0.2)

            res_block = [
                conv1, norm1, relu1, conv2, norm2, relu2, conv3, norm3, relu3
            ]

            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
Esempio n. 28
0
    def __init__(self, innerblock=None):
        super(outerpart, self).__init__()
        with self.name_scope():
            en_conv1 = Conv2D(channels=32,
                              kernel_size=4,
                              strides=2,
                              padding=1,
                              in_channels=3)
            en_relu1 = LeakyReLU(alpha=0.2)
            en_norm1 = BatchNorm(momentum=0.1,
                                 in_channels=32,
                                 prefix='en_norm1')

            de_relu1 = Activation(activation='relu')
            de_norm1 = BatchNorm(momentum=0.1,
                                 in_channels=3,
                                 prefix='de_norm1')
            de_conv1 = Conv2DTranspose(channels=3,
                                       kernel_size=4,
                                       strides=2,
                                       padding=1,
                                       in_channels=32)
            channel_trans = Conv2D(channels=1,
                                   in_channels=3,
                                   kernel_size=1,
                                   prefix='')
            encoder1 = [en_conv1, en_norm1, en_relu1]
            decoder1 = [de_conv1, de_norm1, de_relu1, channel_trans]

            self.encoder1 = HybridSequential()
            with self.encoder1.name_scope():
                for block in encoder1:
                    self.encoder1.add(block)
            self.innerblock = innerblock
            self.decoder1 = HybridSequential()
            with self.decoder1.name_scope():
                for block in decoder1:
                    self.decoder1.add(block)
Esempio n. 29
0
 def __init__(self, opts, num_filters, pool_type='avg'):
     super(TransitionBlock, self).__init__()
     self.pool_type = pool_type
     self.tblock = HybridSequential()
     self.tblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
     self.tblock.add(Activation(opts.activation))
     self.tblock.add(
         Conv2D(channels=int(num_filters * opts.reduction),
                kernel_size=(1, 1),
                strides=(1, 1),
                use_bias=opts.use_bias,
                padding=(0, 0)))
     if opts.drop_out > 0:
         self.tblock.add(Dropout(opts.drop_out))
Esempio n. 30
0
def build_discriminator(n_filters, n_channels, mx_ctx):
    netD = HybridSequential()
    with netD.name_scope():
        # Input is n_channels * 64 * 64
        netD.add(Conv2D(n_filters, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 2, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 4, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 8, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(1, 4, 1, 0, use_bias=False))

    netD.initialize(mx.init.Normal(0.02), ctx=mx_ctx)
    netD.hybridize()