def __init__(self,
                 inner_channels,
                 outer_channels,
                 inner_block=None,
                 innermost=False,
                 outermost=False,
                 use_dropout=False,
                 use_bias=False,
                 final_out=3):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=final_out,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)
示例#2
0
文件: splat.py 项目: zhongtb/gluon-cv
 def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
              dilation=(1, 1), groups=1, radix=2, in_channels=None, r=2,
              norm_layer=BatchNorm, norm_kwargs=None, drop_ratio=0,
              *args, **kwargs):
     super(SplitAttentionConv, self).__init__()
     norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
     inter_channels = max(in_channels*radix//2//r, 32)
     self.radix = radix
     self.cardinality = groups
     self.conv = Conv2D(channels*radix, kernel_size, strides, padding, dilation,
                        groups=groups*radix, *args, in_channels=in_channels, **kwargs)
     self.use_bn = norm_layer is not None
     if self.use_bn:
         self.bn = norm_layer(in_channels=channels*radix, **norm_kwargs)
     self.relu = Activation('relu')
     self.fc1 = Conv2D(inter_channels, 1, in_channels=channels, groups=self.cardinality)
     if self.use_bn:
         self.bn1 = norm_layer(in_channels=inter_channels, **norm_kwargs)
     self.relu1 = Activation('relu')
     if drop_ratio > 0:
         self.drop = nn.Dropout(drop_ratio)
     else:
         self.drop = None
     self.fc2 = Conv2D(channels*radix, 1, in_channels=inter_channels, groups=self.cardinality)
     self.channels = channels
    def __init__(self, opts):
        super(BasicBlock, self).__init__()
        self.bblock = HybridSequential()
        if opts.bottle_neck:
            if opts.norm_type is 'batch':
                self.bblock.add(NormLayer())
            elif opts.norm_type is 'group':
                self.bblock.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.bblock.add(InstanceNorm())
            if opts.activation in ['leaky']:
                self.bblock.add(LeakyReLU(alpha=opts.alpha))
            else:
                self.bblock.add(Activation(opts.activation))
            self.bblock.add(Conv3D(channels=int(opts.growth_rate * 4), kernel_size=(opts.zKernelSize, 1, 1),
                              strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
            if opts.drop_out > 0:
                self.bblock.add(Dropout(opts.drop_out))
        if opts.norm_type is 'batch':
            self.bblock.add(NormLayer())
        elif opts.norm_type is 'group':
            self.bblock.add(GroupNorm(in_channels=int(opts.growth_rate * 4)))
        elif opts.norm_type is 'instance':
            self.bblock.add(InstanceNorm())

        if opts.activation in ['leaky']:
            self.bblock.add(LeakyReLU(opts.alpha))
        else:
            self.bblock.add(Activation(opts.activation))
        self.bblock.add(Conv3D(channels=int(opts.growth_rate), kernel_size=(opts.zKernelSize, 3, 3),
                          strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 1, 1)))
        if opts.drop_out > 0:
            self.bblock.add(Dropout(opts.drop_out))
示例#4
0
 def __init__(self, opts):
     super(BasicBlock, self).__init__()
     self.bblock = HybridSequential()
     if opts.bottle_neck:
         self.bblock.add(
             BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         if not opts.trans_block:
             self.bblock.add(LeakyReLU(alpha=.2))
         else:
             self.bblock.add(Activation(opts.activation))
         self.bblock.add(
             Conv2D(channels=int(opts.growth_rate * 4),
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    use_bias=opts.use_bias,
                    padding=(0, 0)))
         if opts.drop_out > 0:
             self.bblock.add(Dropout(opts.drop_out))
     self.bblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
     self.bblock.add(Activation(activation=opts.activation))
     self.bblock.add(
         Conv2D(channels=int(opts.growth_rate),
                kernel_size=(3, 3),
                strides=(1, 1),
                use_bias=opts.use_bias,
                padding=(1, 1)))
     if opts.drop_out > 0:
         self.bblock.add(Dropout(opts.drop_out))
示例#5
0
def build_generator(n_filters, n_channels, mx_ctx):
    netG = HybridSequential()
    with netG.name_scope():
        # Input is Z
        netG.add(Conv2DTranspose(n_filters * 8, kernel_size=4, strides=1, padding=0, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters * 4, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters * 2, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_channels, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("tanh"))

    netG.initialize(mx.init.Normal(0.02), ctx=mx_ctx)
    netG.hybridize()
    return netG
示例#6
0
    def __init__(self, block, layers, channels, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():

            # pretrained weight 사용하려면, self.features = nn.HybridSequential(prefix='상관없음')을 사용하는 수밖에 없다.
            self.features = HybridSequential(prefix='')
            self.features.add(BatchNorm(
                scale=False, center=False))  # 의문점 하나 : 맨 앞에 왜 batch norm을???
            self.features.add(Conv2D(channels[0], 7, 2, 3, use_bias=False))
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))
            self.features.add(MaxPool2D(3, 2, 1))  # 4번째

            in_channels = channels[0]
            # 5(c2),6(c3),7(c4),8
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=in_channels))
                in_channels = channels[i + 1]
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))  # 10(c5)
示例#7
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False, latent=256, usetanh = False ):
            super(Decoder, self).__init__()
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 
	    nf_mult = 2 ** n_layers
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult/2, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=latent,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='relu'))
            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
                #self.model.add(LeakyReLU(alpha=0.2))
                if n==2:
                      self.model.add(Dropout(rate=0.5))
                self.model.add(Activation(activation='relu'))
            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))

            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='tanh'))
示例#8
0
    def __init__(self, layers, filters):
        super(VGGAtrousBase, self).__init__()
        with self.name_scope():
            '''
            # caffe에서 가져온 pre-trained weights를 사용하기 때문에, 아래와 같은 init_scale가 필요하다고 함
            -> caffe의 pre-trained model은 입력 scale이 0 ~ 255임 
            '''
            init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape(
                (1, 3, 1, 1)) * 255
            self.init_scale = self.params.get_constant('init_scale',
                                                       init_scale)

            # layers : [2, 2, 3, 3, 3], filters [64, 128, 256, 512, 512])
            self.stages = HybridSequential()
            for layer, filter in zip(layers, filters):
                stage = HybridSequential(prefix='')
                with stage.name_scope():
                    for _ in range(layer):
                        stage.add(
                            Conv2D(filter,
                                   kernel_size=3,
                                   padding=1,
                                   weight_initializer=mx.init.Xavier(
                                       rnd_type='gaussian',
                                       factor_type='out',
                                       magnitude=3),
                                   bias_initializer='zeros'))
                        stage.add(Activation('relu'))
                self.stages.add(stage)

            # fc6, fc7 to dilated convolution layer - hybrid_forward에서 pooling 진행
            stage = HybridSequential(prefix='dilated_')
            with stage.name_scope():
                # conv6(fc6) - dilated
                stage.add(
                    Conv2D(1024,
                           kernel_size=3,
                           padding=6,
                           dilation=6,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

                # conv7(fc7)
                stage.add(
                    Conv2D(1024,
                           kernel_size=1,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

            self.stages.add(stage)
            self.norm4 = Normalize(n_channel=filters[3], initial=20, eps=1e-5)
示例#9
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False, usetanh = False ):
        super(CEGeneratorP, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=4096, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            #self.model.add(BatchNorm(momentum=0.1, in_channels =128, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))

            # Decoder
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult/2, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=4096,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='relu'))
            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
                #self.model.add(LeakyReLU(alpha=0.2))
                if n==2:
                      self.model.add(Dropout(rate=0.5))
                self.model.add(Activation(activation='relu'))
            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))

            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='tanh'))
    def __init__(self, base=18,
                 deconv_channels=(256, 128, 64),
                 deconv_kernels=(4, 4, 4),
                 pretrained=True,
                 root=os.path.join(os.getcwd(), 'models'),
                 use_dcnv2=False,
                 ctx=mx.cpu()):

        mxnet_version = float(mx.__version__[0:3])
        if mxnet_version < 1.5:
            logging.error("please upgrade mxnet version above 1.5.x")
            raise EnvironmentError

        super(UpConvResNet, self).__init__()
        self._use_dcnv2 = use_dcnv2
        self._resnet = get_resnet(base, pretrained=pretrained, root=root, ctx=ctx)
        self._upconv = HybridSequential('')
        with self._upconv.name_scope():
            for channel, kernel in zip(deconv_channels, deconv_kernels):
                kernel, padding, output_padding = self._get_conv_argument(kernel)
                if self._use_dcnv2:
                    '''
                    in paper, we first change the channels of the three upsampling layers to
                    256, 128, 64, respectively, to save computation, we then add one 3 x 3 deformable convolutional layer
                    before each up-convolution layer with channel 256, 128, 64 
                    '''
                    assert hasattr(contrib.cnn, 'ModulatedDeformableConvolution'), \
                        "No ModulatedDeformableConvolution found in mxnet, consider upgrade to mxnet 1.6.0..."
                    self._upconv.add(contrib.cnn.ModulatedDeformableConvolution(channels=channel,
                                                                                kernel_size=3,
                                                                                strides=1,
                                                                                padding=1,
                                                                                use_bias=False,
                                                                                num_deformable_group=1))
                else:
                    self._upconv.add(Conv2D(channels=channel,
                                            kernel_size=3,
                                            strides=1,
                                            padding=1, use_bias=False))
                self._upconv.add(BatchNorm(momentum=0.9))
                self._upconv.add(Activation('relu'))
                self._upconv.add(Conv2DTranspose(channels=channel,
                                                 kernel_size=kernel,
                                                 strides=2,
                                                 padding=padding,
                                                 output_padding=output_padding,
                                                 use_bias=False,
                                                 weight_initializer=mx.init.Bilinear()))
                self._upconv.add(BatchNorm(momentum=0.9))
                self._upconv.add(Activation('relu'))

        self._upconv.initialize(ctx=ctx)
        logging.info(f"{self.__class__.__name__} weight init 완료")
示例#11
0
 def __init__(self,
              network,
              add_filters,
              norm_layer=BatchNorm,
              norm_kwargs=None,
              use_bn=False,
              reduce_ratio=1.0,
              min_depth=128,
              **kwargs):
     super(ResNetV1bSSD, self).__init__()
     assert network.endswith('v1b')
     if norm_kwargs is None:
         norm_kwargs = {}
     res = get_model(network, **kwargs)
     weight_init = mx.init.Xavier(rnd_type='gaussian',
                                  factor_type='out',
                                  magnitude=2)
     with self.name_scope():
         self.stage1 = HybridSequential('stage1')
         for l in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2']:
             self.stage1.add(getattr(res, l))
         self.stage2 = HybridSequential('stage2')
         self.stage2.add(res.layer3)
         # set stride from (2, 2) -> (1, 1) in first conv of layer3
         self.stage2[0][0].conv1._kwargs['stride'] = (1, 1)
         # also the residuel path
         self.stage2[0][0].downsample[0]._kwargs['stride'] = (1, 1)
         self.stage2.add(res.layer4)
         self.more_stages = HybridSequential('more_stages')
         for i, num_filter in enumerate(add_filters):
             stage = HybridSequential('more_stages_' + str(i))
             num_trans = max(min_depth,
                             int(round(num_filter * reduce_ratio)))
             stage.add(
                 Conv2D(channels=num_trans,
                        kernel_size=1,
                        use_bias=not use_bn,
                        weight_initializer=weight_init))
             if use_bn:
                 stage.add(norm_layer(**norm_kwargs))
             stage.add(Activation('relu'))
             padding = 0 if i == len(add_filters) - 1 else 1
             stage.add(
                 Conv2D(channels=num_filter,
                        kernel_size=3,
                        strides=2,
                        padding=padding,
                        use_bias=not use_bn,
                        weight_initializer=weight_init))
             if use_bn:
                 stage.add(norm_layer(**norm_kwargs))
             stage.add(Activation('relu'))
             self.more_stages.add(stage)
示例#12
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False, istest = False, isthreeway = False):
        super(LatentDiscriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            self.model.add(gluon.nn.Dense(128))
            self.model.add(Activation(activation='relu'))
            self.model.add(gluon.nn.Dense(64))
            self.model.add(Activation(activation='relu'))
            self.model.add(gluon.nn.Dense(32))
            self.model.add(Activation(activation='relu'))
            self.model.add(gluon.nn.Dense(16))
            self.model.add(Activation(activation='sigmoid'))
示例#13
0
 def __init__(self,
              channels,
              kernel_size,
              strides=1,
              padding=0,
              dilation=1,
              groups=1,
              radix=2,
              *args,
              in_channels=None,
              r=2,
              norm_layer=BatchNorm,
              norm_kwargs=None,
              drop_ratio=0,
              **kwargs):
     super().__init__()
     norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
     inter_channels = max(in_channels * radix // 2 // r, 32)
     self.radix = radix
     self.cardinality = groups
     self.conv = Conv1D(channels * radix,
                        kernel_size,
                        strides,
                        padding,
                        dilation,
                        groups=groups * radix,
                        *args,
                        in_channels=in_channels,
                        **kwargs)
     if USE_BN:
         self.bn = norm_layer(in_channels=channels * radix, **norm_kwargs)
     self.relu = Activation('relu')
     self.fc1 = Conv1D(inter_channels,
                       1,
                       in_channels=channels,
                       groups=self.cardinality)
     if USE_BN:
         self.bn1 = norm_layer(in_channels=inter_channels, **norm_kwargs)
     self.relu1 = Activation('relu')
     if drop_ratio > 0:
         self.drop = nn.Dropout(drop_ratio)
     else:
         self.drop = None
     self.fc2 = Conv1D(channels * radix,
                       1,
                       in_channels=inter_channels,
                       groups=self.cardinality)
     self.channels = channels
     self.rsoftmax = rSoftMax(radix, groups)
示例#14
0
文件: rl.py 项目: tsuberim/RL
    def __init__(self, n_dims=128, **kwargs):
        PersistentBlock.__init__(self, **kwargs)
        if n_dims < 16:
            raise ValueError('`n_dims` must be at least 16 (given: %d)' %
                             n_dims)

        self.encoder = Sequential()
        self.encoder.add(BatchNorm(), Conv2D(int(n_dims / 16), 6, (4, 3)),
                         Activation('relu'), Conv2D(int(n_dims / 8), 3),
                         Activation('relu'), Conv2D(int(n_dims / 2), 3),
                         BatchNorm(), MaxPool2D(), Activation('relu'),
                         Conv2D(int(n_dims),
                                3), MaxPool2D(), Activation('relu'),
                         Conv2D(int(n_dims), 3), MaxPool2D(),
                         Activation('relu'), Flatten())
示例#15
0
    def __init__(self,
                 base=18,
                 heads=OrderedDict(),
                 head_conv_channel=64,
                 pretrained=True,
                 root=os.path.join(os.getcwd(), 'models'),
                 use_dcnv2=False,
                 ctx=mx.cpu()):
        super(CenterNet, self).__init__()

        with self.name_scope():
            self._base_network = get_upconv_resnet(base=base,
                                                   pretrained=pretrained,
                                                   root=root,
                                                   use_dcnv2=use_dcnv2,
                                                   ctx=ctx)
            self._heads = HybridSequential('heads')
            for name, values in heads.items():
                head = HybridSequential(name)
                num_output = values['num_output']
                bias = values.get('bias', 0.0)
                head.add(
                    Conv2D(head_conv_channel,
                           kernel_size=(3, 3),
                           padding=(1, 1),
                           use_bias=True))
                head.add(Activation('relu'))
                head.add(
                    Conv2D(num_output,
                           kernel_size=(1, 1),
                           use_bias=True,
                           bias_initializer=mx.init.Constant(bias)))
                self._heads.add(head)
        self._heads.initialize(ctx=ctx)
示例#16
0
文件: dropfile.py 项目: xindd/GCN
    def __init__(self,
                 entrylist,
                 gene_to_index,
                 entry_to_gene,
                 activation='relu',
                 **kwargs):
        super().__init__(**kwargs)

        with self.name_scope():
            self.layer_list = nn.HybridSequential()
            for index, value in enumerate(entrylist):
                genelist = entry_to_gene[value].split(' ')
                # w = self.params.get(value, shape=(1, len(genelist)))
                self.layer_list.add(
                    FeaturesTransform_layer(index=get_dict_values(
                        gene_to_index, genelist),
                                            value=value,
                                            length=len(genelist)))
            self.entry_b = self.params.get('entry_b',
                                           shape=(len(entrylist), 1))

            if activation == 'identity':
                self.activation = lambda X: X
            else:
                self.activation = Activation(activation)
def preNeuralNet(fs, T, ctx, template_block, margin, learning_rate=0.003):
    net = gluon.nn.Sequential()
    with net.name_scope(
    ):  # Used to disambiguate saving and loading net parameters
        net.add(
            MatchedFilteringLayer(
                mod=fs * T,
                fs=fs,
                template_H1=template_block[:, :1],  #.as_in_context(ctx),
                template_L1=template_block[:, -1:]  #.as_in_context(ctx) 
            ))
        net.add(CutHybridLayer(margin=margin))
        net.add(Conv2D(channels=16, kernel_size=(1, 3), activation='relu'))
        net.add(MaxPool2D(pool_size=(1, 4), strides=2))
        net.add(Conv2D(channels=32, kernel_size=(1, 3), activation='relu'))
        net.add(MaxPool2D(pool_size=(1, 4), strides=2))
        net.add(Flatten())
        net.add(Dense(32))
        net.add(Activation('relu'))
        net.add(Dense(2))

    net.initialize(mx.init.Xavier(magnitude=2.24),
                   ctx=ctx[-1],
                   force_reinit=True)  # Initialize parameters of all layers
    net.summary(nd.random.randn(1, 2, 2, 1, fs * T, ctx=ctx[-1]))
    net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx,
                   force_reinit=True)  # Initialize parameters of all layers
    # 交叉熵损失函数
    # loss = gloss.SoftmaxCrossEntropyLoss()
    # The cross-entropy loss for binary classification.
    bloss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': learning_rate})
    return net, bloss, trainer
示例#18
0
    def __init__(self, channels, bn_mom, act_type, unit_name):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlock, self).__init__()
        self.act_type = act_type
        self.unit_name = unit_name

        self.body = HybridSequential()

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv0" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(
            Activation(self.act_type,
                       prefix="%s_%s0" % (self.unit_name, self.act_type)))

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv1" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))
示例#19
0
 def __init__(self, in_channels, n_layers=3, ndf=64, use_sigmoid=False, use_bias=False):
     super(Discriminator, self).__init__()
     # 用下面一段代码来配置标准的2x 下采样卷积
     kernel_size=4
     padding = int(np.ceil((kernel_size-1)/2))
     self.model = nn.HybridSequential()
     # 先用一个卷积将输入转为第一层feature map
     self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2, padding=padding, use_bias=use_bias, in_channels=in_channels))
     self.model.add(LeakyReLU(alpha=0.2))
     
     nf_mult = 1
     for n in range(1, n_layers):
         nf_mult_prev = nf_mult
         nf_mult = min(2**n, 8)
         self.model.add(
             Conv2D(channels=ndf*nf_mult, kernel_size=kernel_size, strides=2, padding=padding, use_bias=use_bias, in_channels=ndf*nf_mult_prev),
             BatchNorm(momentum=0.1, in_channels=ndf*nf_mult),
             LeakyReLU(alpha=0.2))
     
     # 若layers较少,channel未达到512, 可以继续升一点维度
     nf_mult_prev = nf_mult
     nf_mult = min(2**n_layers, 8)
     self.model.add(
         Conv2D(channels=ndf*nf_mult, kernel_size=kernel_size, strides=1, padding=padding, use_bias=use_bias, in_channels=ndf*nf_mult_prev),
         BatchNorm(momentum=0.1, in_channels=ndf*nf_mult),
         LeakyReLU(alpha=0.2))
     # 输出: output channel为什么设为1?
     self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1, padding=padding, use_bias=True, in_channels=ndf*nf_mult))
     if use_sigmoid:
         self.model.add(Activation('sigmoid'))
示例#20
0
def lateral_conv(channels, kernel_size, strides, padding):
    lateral = HybridSequential()
    with lateral.name_scope():
        lateral.add(Conv2D(channels, kernel_size, strides, padding))
        lateral.add(BatchNorm(momentum=0.9, epsilon=1e-5))
        lateral.add(Activation('relu'))
    return lateral
    def __init__(self, opts):
        super(DenseMultipathNet, self).__init__()
        opts.units = opts.units[:opts.num_stage]
        assert (len(opts.units) == opts.num_stage)

        num_filters = opts.init_channels
        num_filters_list = []
        for stage in range(opts.num_stage):
            num_filters += opts.units[stage] * opts.growth_rate
            num_filters = int(floor(num_filters * opts.reduction))
            num_filters_list.append(num_filters)

        self.net = HybridSequential()
        with self.net.name_scope():
            self.blocks = EncoderDecoderUnit(opts, num_filters_list[opts.num_stage-1], opts.num_stage-1, innermost=True)
            for stage in range(opts.num_stage-2, -1, -1):
                self.blocks = EncoderDecoderUnit(opts, num_filters_list[stage], stage, inner_block=self.blocks)
            self.net.add(FirstBlock(opts))
            self.net.add(self.blocks)
            self.net.add(ResDBlock(opts, num_filters=16))
            if opts.norm_type is 'batch':
                self.net.add(NormLayer())
            elif opts.norm_type is 'group':
                self.net.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())

            if opts.activation in ['leaky']:
                self.net.add(LeakyReLU(opts.alpha))
            else:
                self.net.add(Activation(opts.activation))
            self.net.add(Conv3D(kernel_size=(1, 1, 1), channels=2, use_bias=opts.use_bias))
            self.net.add(Softmax())
示例#22
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides=(1, 1),
                 padding=(0, 0),
                 dilation=(1, 1),
                 groups=1,
                 use_bias=False):
        super(SplitAttentionConv, self).__init__()
        radix = 2
        reduction = 2

        assert (in_channels == out_channels)

        self.radix = radix
        self.cardinality = groups
        self.out_channels = out_channels

        self.conv = Conv2D(
            out_channels * radix,
            kernel_size,
            strides,
            padding,
            dilation,
            groups=(groups * radix),
            in_channels=in_channels,
            use_bias=use_bias)
        self.bn = BatchNorm(in_channels=out_channels * radix)
        self.relu = Activation('relu')

        self.sa = SABlock(
            out_channels=out_channels,
            groups=groups,
            radix=radix)
示例#23
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False, istest = False, isthreeway = False):
        super(Discriminator, self).__init__()
        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                                  padding=2, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=32))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=128))
            self.model.add(LeakyReLU(alpha=0.2))

            self.model.add(gluon.nn.Dense(1))

            if isthreeway:
                self.model.add(gluon.nn.Dense(3))
            # elif use_sigmoid:
            self.model.add(Activation(activation='sigmoid'))
示例#24
0
def get_activation(activation):
    if activation is None:
        return Identity()
    elif type(activation) is str:
        return Activation(activation)
    else:
        return activation
示例#25
0
    def __init__(self, count: int, depth: int, frac = 100) -> None:
        super(Network, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():           
            self.add(Dense(int(1475/frac)))

            self.add(LeakyReLU(alpha=0.2))

            layer = Identity(512, 512)
            layer = Skip(int(2949/frac), int(5898/frac), layer)

            layer.block.add(Dropout(0.5))
# 48 x 48 x 64 =  147456
# 24 x 24 x 512 = 294912
# 48 x 48 x 512 = 1179648

            layer = Skip(int(589824/frac), int(1179648/frac), layer)
            layer = Skip(int(147456/frac), int(294912/frac), layer)
            layer = Skip(int(36864/frac), int(73728/frac), layer)

            self.add(layer)
            self.add(Dense(int(27648)))
        
            self.add(Activation("sigmoid"))
示例#26
0
    def __init__(self, count: int, depth: int) -> None:
        super(Network, self).__init__()

        self._count = count
        self._depth = depth

        with self.name_scope():
            self.add(Conv2D(64, 4, 2, 1, in_channels=depth))
            self.add(LeakyReLU(alpha=0.2))

            layer = Identity(512, 512)
            layer = Skip(512, 512, layer)

            for _ in range(0):
                layer = Skip(512, 512, layer)

                layer.block.add(Dropout(0.5))

            layer = Skip(256, 256, layer)
            layer = Skip(128, 128, layer)
            layer = Skip(64, 64, layer)

            self.add(layer)
            self.add(Conv2DTranspose(count, 4, 2, 1, in_channels=128))
            self.add(Activation("sigmoid"))

        for param in self.collect_params().values():
            param.initialize()
            if "bias" in param.name:
                param.set_data(zeros(param.data().shape))
            elif "gamma" in param.name:
                param.set_data(random_normal(1, 0.02, param.data().shape))
            elif "weight" in param.name:
                param.set_data(random_normal(0, 0.02, param.data().shape))
示例#27
0
文件: P2PGAN.py 项目: wshaow/GAN
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False):
        super(Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 4
            padding = int(np.ceil((kernel_size - 1)/2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = min(2 ** n, 8)
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n_layers, 8)
            self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))
示例#28
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
            super(Encoder, self).__init__()
	    usetanh = True
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=latent, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels =latent, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))
示例#29
0
 def __init__(self, layers, filters, extras):
     super(VGGAtrousExtractor, self).__init__(layers, filters)
     '''
     extra_spec = {
     300: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 1, 0)),
           ((128, 1, 1, 0), (256, 3, 1, 0))],
 
     512: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 4, 1, 1))],
     '''
     # out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
     with self.name_scope():
         self.extras = HybridSequential()
         for i, config in enumerate(extras):
             extra = HybridSequential(prefix='extra%d_' % (i))
             with extra.name_scope():
                 for channels, kernel, strides, padding in config:
                     extra.add(
                         Conv2D(channels=channels,
                                kernel_size=kernel,
                                strides=strides,
                                padding=padding,
                                weight_initializer=mx.init.Xavier(
                                    rnd_type='gaussian',
                                    factor_type='out',
                                    magnitude=3),
                                bias_initializer='zeros'))
                     extra.add(Activation('relu'))
             self.extras.add(extra)
示例#30
0
 def __init__(self, **kwargs):
     super(GeneratorV2, self).__init__(**kwargs)
     with self.name_scope():
         self.add(
             # input (batch, channel, 1, 1)
             gluon.nn.Conv2DTranspose(512,
                                      kernel_size=4,
                                      strides=1,
                                      padding=0,
                                      use_bias=False),
             gluon.nn.BatchNorm(),
             gluon.nn.Activation('relu'),
             # output (batch, 512, 4, 4)
             gluon.nn.Conv2DTranspose(256,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      use_bias=False),
             gluon.nn.BatchNorm(),
             gluon.nn.Activation('relu'),
             # output (batch, 512, 8, 8)
             gluon.nn.Conv2DTranspose(128,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      use_bias=False),
             gluon.nn.BatchNorm(),
             gluon.nn.Activation('relu'),
             # output (batch, 256, 16, 16)
             gluon.nn.Conv2DTranspose(128,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      use_bias=False),
             gluon.nn.BatchNorm(),
             gluon.nn.Activation('relu'),
             # output (batch, 128, 32, 32)
             gluon.nn.Conv2DTranspose(64,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      use_bias=False),
             gluon.nn.BatchNorm(),
             gluon.nn.Activation('relu'),
             # output (batch, 64, 64, 64)
             InceptionBlock(64, 32),
             gluon.nn.Conv2DTranspose(32,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      use_bias=False),
             gluon.nn.BatchNorm(),
             gluon.nn.Activation('relu'),
             InceptionBlock(32, 16),
             gluon.nn.Conv2DTranspose(3,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      use_bias=False),
             Activation('tanh'))