示例#1
0
    def __init__(self, layers, filters):
        super(VGGAtrousBase, self).__init__()
        with self.name_scope():
            '''
            # caffe에서 가져온 pre-trained weights를 사용하기 때문에, 아래와 같은 init_scale가 필요하다고 함
            -> caffe의 pre-trained model은 입력 scale이 0 ~ 255임 
            '''
            init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape(
                (1, 3, 1, 1)) * 255
            self.init_scale = self.params.get_constant('init_scale',
                                                       init_scale)

            # layers : [2, 2, 3, 3, 3], filters [64, 128, 256, 512, 512])
            self.stages = HybridSequential()
            for layer, filter in zip(layers, filters):
                stage = HybridSequential(prefix='')
                with stage.name_scope():
                    for _ in range(layer):
                        stage.add(
                            Conv2D(filter,
                                   kernel_size=3,
                                   padding=1,
                                   weight_initializer=mx.init.Xavier(
                                       rnd_type='gaussian',
                                       factor_type='out',
                                       magnitude=3),
                                   bias_initializer='zeros'))
                        stage.add(Activation('relu'))
                self.stages.add(stage)

            # fc6, fc7 to dilated convolution layer - hybrid_forward에서 pooling 진행
            stage = HybridSequential(prefix='dilated_')
            with stage.name_scope():
                # conv6(fc6) - dilated
                stage.add(
                    Conv2D(1024,
                           kernel_size=3,
                           padding=6,
                           dilation=6,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

                # conv7(fc7)
                stage.add(
                    Conv2D(1024,
                           kernel_size=1,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

            self.stages.add(stage)
            self.norm4 = Normalize(n_channel=filters[3], initial=20, eps=1e-5)
示例#2
0
class Res_Block(HybridBlock):
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            relu1 = LeakyReLU(alpha=0.2)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            res_block = [conv1, norm1, relu1, conv2, norm2, relu2]
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)

    def hybrid_forward(self, F, x):
        residual = x
        x = self.res(x)
        x = x + residual
        return x
示例#3
0
 def __init__(self, layers, filters, extras):
     super(VGGAtrousExtractor, self).__init__(layers, filters)
     '''
     extra_spec = {
     300: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 1, 0)),
           ((128, 1, 1, 0), (256, 3, 1, 0))],
 
     512: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 4, 1, 1))],
     '''
     # out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
     with self.name_scope():
         self.extras = HybridSequential()
         for i, config in enumerate(extras):
             extra = HybridSequential(prefix='extra%d_' % (i))
             with extra.name_scope():
                 for channels, kernel, strides, padding in config:
                     extra.add(
                         Conv2D(channels=channels,
                                kernel_size=kernel,
                                strides=strides,
                                padding=padding,
                                weight_initializer=mx.init.Xavier(
                                    rnd_type='gaussian',
                                    factor_type='out',
                                    magnitude=3),
                                bias_initializer='zeros'))
                     extra.add(Activation('relu'))
             self.extras.add(extra)
示例#4
0
def build_generator(n_filters, n_channels, mx_ctx):
    netG = HybridSequential()
    with netG.name_scope():
        # Input is Z
        netG.add(Conv2DTranspose(n_filters * 8, kernel_size=4, strides=1, padding=0, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters * 4, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters * 2, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_filters, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("relu"))

        netG.add(Conv2DTranspose(n_channels, kernel_size=4, strides=2, padding=1, use_bias=False))
        netG.add(BatchNorm())
        netG.add(Activation("tanh"))

    netG.initialize(mx.init.Normal(0.02), ctx=mx_ctx)
    netG.hybridize()
    return netG
示例#5
0
def lateral_conv(channels, kernel_size, strides, padding):
    lateral = HybridSequential()
    with lateral.name_scope():
        lateral.add(Conv2D(channels, kernel_size, strides, padding))
        lateral.add(BatchNorm(momentum=0.9, epsilon=1e-5))
        lateral.add(Activation('relu'))
    return lateral
class outerpart(HybridBlock):
    def __init__(self, innerblock=None):
        super(outerpart, self).__init__()
        with self.name_scope():
            en_conv1 = Conv2D(channels=32,
                              kernel_size=4,
                              strides=2,
                              padding=1,
                              in_channels=3)
            en_relu1 = LeakyReLU(alpha=0.2)
            en_norm1 = BatchNorm(momentum=0.1,
                                 in_channels=32,
                                 prefix='en_norm1')

            de_relu1 = Activation(activation='relu')
            de_norm1 = BatchNorm(momentum=0.1,
                                 in_channels=3,
                                 prefix='de_norm1')
            de_conv1 = Conv2DTranspose(channels=3,
                                       kernel_size=4,
                                       strides=2,
                                       padding=1,
                                       in_channels=32)
            channel_trans = Conv2D(channels=1,
                                   in_channels=3,
                                   kernel_size=1,
                                   prefix='')
            encoder1 = [en_conv1, en_norm1, en_relu1]
            decoder1 = [de_conv1, de_norm1, de_relu1, channel_trans]

            self.encoder1 = HybridSequential()
            with self.encoder1.name_scope():
                for block in encoder1:
                    self.encoder1.add(block)
            self.innerblock = innerblock
            self.decoder1 = HybridSequential()
            with self.decoder1.name_scope():
                for block in decoder1:
                    self.decoder1.add(block)

    def hybrid_forward(self, F, x):
        x1 = self.encoder1(x)
        x2, p_att, c_att = self.innerblock(x1)
        x3 = self.decoder1(x2)

        return x3, p_att, c_att
示例#7
0
文件: models.py 项目: xindd/GCN
def train_model(dataset):
    print(u'loading ppi net data...')
    ppi_net = dataset.get_ppi_net()
    print(u'loading relation between protein and entry ...')
    protein_entry_features = dataset.protein_entry_features()
    print(u'loading entry net...')
    entry_net = dataset.get_entry_net()
    print(u'loading relation between entry and pathway...')
    entry_pathway_features = dataset.entry_pathway_features()
    print(u'loading pathway net...')
    pathway_net = dataset.get_pathway_net()
    print(u'build net models...')
    genelist = dataset.genelist_order
    net = HybridSequential()
    with net.name_scope():
        ppi_in_units = len(genelist)
        # net.add(nn.BatchNorm())
        ppi_hidden_layer = [(2, 'relu')
                            ]  # Format: (units in layer, activation function)
        ppi_features, ppi_out_units = features(ppi_net['A'], 1,
                                               ppi_hidden_layer)
        net.add(ppi_features)
        net.add(nn.BatchNorm())
        ppi_to_entry_features = FeaturesTransform(
            protein_entry_features.entrylist,
            protein_entry_features.gene_to_index,
            protein_entry_features.entry_to_gene)
        net.add(ppi_to_entry_features)
        entry_hidden_layer = [(2, 'relu')]
        entry_features, entry_out_units = features(
            entry_net['A'], 1,
            entry_hidden_layer)  #ppi_out_units, entry_hidden_layer)
        net.add(entry_features)
        net.add(nn.BatchNorm())
        entry_to_pathway_features = FeaturesTransform(
            entry_pathway_features.pathwaylist,
            entry_pathway_features.entry_to_index,
            entry_pathway_features.pathway_to_entry)
        net.add(entry_to_pathway_features)
        # pathway
        pathway_hidden_layer = [(2, 'relu')]
        pathway_features, pathway_out_units = features(
            pathway_net['A'], 1,
            pathway_hidden_layer)  #entry_out_units, pathway_hidden_layer)
        net.add(pathway_features)
        net.add(nn.BatchNorm())
        #classifier = LogisticRegressor(pathway_out_units, len(entry_pathway_features.pathwaylist), 33)
        #net.add(classifier)
        net.add(nn.Dense(100, activation='relu'))
        net.add(nn.BatchNorm())
        net.add(nn.Dense(33))
        # net.add(nn.BatchNorm())
        # net.add(nn.Activation('sigmoid'))
        # classifier = LogisticRegressor(entry_out_units, len(protein_entry_features.entrylist), 2)

    # net.hybridize()

    return net, [ppi_features, entry_features, pathway_features]
示例#8
0
def conv_block(channels, num_convs=2, use_bias=False, use_global_stats=False, **kwargs):
    """Define U-Net convolution block"""
    out = HybridSequential(prefix="")
    with out.name_scope():
        for _ in range(num_convs):
            out.add(Conv3D(channels=channels, kernel_size=3, padding=1, use_bias=use_bias))
            out.add(Activation('relu'))
            out.add(BatchNorm(use_global_stats=use_global_stats)) #BN after relu seems to be the more recommended option. 
    return out
def ConvBatchAct(channels=1, kernel=1, stride=1, pad=0, num_group=1, active=True):
    net = HybridSequential()
    with net.name_scope():
        net.add(Conv2D(channels, kernel, stride, pad, groups=num_group, use_bias=False))
        net.add(BatchNorm(momentum=0.9))
        if active:
            net.add(Activation('relu'))
            # net.add(Swish())
    return net
示例#10
0
文件: models.py 项目: xindd/GCN
def features(A, in_units, hidden_layer):
    features = HybridSequential()
    with features.name_scope():
        for i, (layer_size, activation_func) in enumerate(hidden_layer):
            layer = GraphConvolution(A,
                                     in_units=in_units,
                                     out_units=layer_size,
                                     activation=activation_func)
            features.add(layer)
            in_units = layer_size

    return features, in_units
示例#11
0
class net2(HybridBlock):
    def __init__(self):
        super(net2,self).__init__()
        self.net=HybridSequential()       
        with self.net.name_scope():
            
            self.net.add(encoder(3,16))
            self.net.add(encoder(16,32))
        self.att= CA_M2(32)
        self.net1=HybridSequential()
        with self.net1.name_scope():      
            self.net1.add(encoder(32,64))
            self.net1.add(decoder(64,32))
            self.net1.add(decoder(32,16))
            self.net1.add(decoder(16,1))

    def hybrid_forward(self,F,x):
        y1=self.net(x)
        y2=self.att(y1)
        y3=self.net1(y2)
        return y3
示例#12
0
class DenseMultipathNet(HybridBlock):
    """Return a whole network"""
    def __init__(self, opts):
        super(DenseMultipathNet, self).__init__()
        opts.units = opts.units[:opts.num_stage]
        assert (len(opts.units) == opts.num_stage)

        num_filters = opts.init_channels
        num_filters_list = []
        for stage in range(opts.num_stage):
            num_filters += opts.units[stage] * opts.growth_rate
            num_filters = int(floor(num_filters * opts.reduction))
            num_filters_list.append(num_filters)

        self.net = HybridSequential()
        with self.net.name_scope():
            self.blocks = EncoderDecoderUnit(opts,
                                             num_filters_list[opts.num_stage -
                                                              1],
                                             opts.num_stage - 1,
                                             innermost=True)
            for stage in range(opts.num_stage - 2, -1, -1):
                self.blocks = EncoderDecoderUnit(opts,
                                                 num_filters_list[stage],
                                                 stage,
                                                 inner_block=self.blocks)
            self.net.add(FirstBlock(opts))
            self.net.add(self.blocks)
            self.net.add(ResDBlock(opts, num_filters=16))
            if opts.norm_type is 'batch':
                self.net.add(
                    BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())
            if opts.activation in ['leaky']:
                self.net.add(LeakyReLU(opts.alpha))
            else:
                self.net.add(Activation(opts.activation))
            self.net.add(
                Conv3D(kernel_size=(1, 1, 1),
                       channels=2,
                       use_bias=opts.use_bias))
            if opts.norm_type is 'batch':
                self.net.add(
                    BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())
            self.net.add(Softmax())

    def hybrid_forward(self, F, x, *args, **kwargs):
        """Forward"""
        return self.net(x)
示例#13
0
class net1(HybridBlock):
    def __init__(self):
        super(net1,self).__init__()
        self.net=HybridSequential()
        with self.net.name_scope():
            self.net.add(encoder(3,16))
            self.net.add(encoder(16,32))
            self.net.add(encoder(32,64))
            self.net.add(decoder(64,32))
            self.net.add(decoder(32,16))
            self.net.add(decoder(16,1))
    def hybrid_forward(self,F,x):
        return self.net(x) 
示例#14
0
文件: P2PGAN.py 项目: wshaow/GAN
class UnetSkipUnit(HybridBlock):
    def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels, kernel_size=4, strides=2, padding=1,
                             in_channels=outer_channels, use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x):  # 是不是HybridBlock对应的是hybrid_forward前项函数
        """

        :param F: 这里F指mx.nd 或者是mx.sym的一个变量
        :param x: 输入数据
        :return:
        """
        if self.outermost:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)   # 将输入的特征与网络得到的特征进行拼接,参考unet的网络结构
示例#15
0
class decoder(HybridBlock):
    def __init__(self,in_channel, out_channel):
        super(decoder,self).__init__()
        with self.name_scope():
            de_conv=Conv2DTranspose(channels=out_channel, kernel_size=4, strides=2, padding=1,
                                          in_channels=in_channel)
            norm = BatchNorm(momentum=0.1, in_channels=out_channel)
            relu = LeakyReLU(alpha=0.2)
        decode=[de_conv,norm,relu]
        self.decoder = HybridSequential()
        with self.decoder.name_scope():
            for block in decode:
                self.decoder.add(block)
    def hybrid_forward(self,F,x):
        return self.decoder(x)
class ResDBlock(HybridBlock):
    """Residual decoding block"""
    def __init__(self, opts, num_filters, group=1):
        super(ResDBlock, self).__init__()
        if opts.num_fpg != -1:
            group = int(num_filters / opts.num_fpg)
        self.body = HybridSequential()
        with self.body.name_scope():
            self.body.add(conv_factory(opts, num_filters, kernel_size=1))
            self.body.add(conv_factory(opts, num_filters, kernel_size=3, group=group))
            self.body.add(conv_factory(opts, num_filters, kernel_size=1))

    def hybrid_forward(self, F, x, *args, **kwargs):
        """Forward"""
        return F.concat(self.body(x), x)
示例#17
0
def build_model(A, X):
    model = HybridSequential()

    with model.name_scope():
        features, out_units = build_features(A, X)
        model.add(features)
        logger.info("GCN Summary: \n{}".format(model))

        classifier = LogisticRegressor(out_units)
        model.add(classifier)
        logger.info("GCN + LR Summary: \n{}".format(model))

    model.hybridize()
    model.initialize(Uniform(1))

    return model, features
def build_features(A, X):
    hidden_layer_specs = [(4, 'tanh'), (2, 'tanh')
                          ]  # Format: (units in layer, activation function)
    in_units = in_units = X.shape[1]

    features = HybridSequential()
    with features.name_scope():
        for i, (layer_size, activation_func) in enumerate(hidden_layer_specs):
            layer = SpectralRule(A,
                                 in_units=in_units,
                                 out_units=layer_size,
                                 activation=activation_func)
            features.add(layer)

            in_units = layer_size
    return features, in_units
def build_model(A, X):
    model = HybridSequential()
    hidden_layer_specs = [(4, 'tanh'), (2, 'tanh')]
    in_units = in_units = X.shape[1]

    with model.name_scope():
        features, out_units = build_features(A, X)
        model.add(features)

        classifier = LogisticRegressor(out_units)
        model.add(classifier)

    model.hybridize()
    model.initialize(Uniform(1))

    return model, features
示例#20
0
class UnetSkipUnit(HybridBlock):

    def __init__(self, inner_channels, outer_channels, inner_blocks=None, inner_most=False, outer_most=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outer_most = outer_most
            en_conv = Conv2D(channels=inner_channels, in_channels=outer_channels, kernel_size=4, strides=2, padding=1,
                             use_bias=use_bias)
            en_relu = LeakyReLU(0.2)
            en_norm = BatchNorm(momentum=.1, in_channels=inner_channels)
            de_relu = Activation('relu')
            de_norm = BatchNorm(momentum=.1, in_channels=outer_channels)

            if inner_most:
                de_conv = Conv2DTranspose(channels=outer_channels, in_channels=inner_channels, kernel_size=4, strides=2,
                                          padding=1, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv]
                model = encoder + decoder
            elif outer_most:
                de_conv = Conv2DTranspose(channels=outer_channels, in_channels=inner_channels * 2, kernel_size=4,
                                          strides=2, padding=1, use_bias=use_bias)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation('tanh')]
                model = encoder + [inner_blocks] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, in_channels=inner_channels * 2, kernel_size=4,
                                          strides=2, padding=1, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_blocks] + decoder

            if use_dropout:
                model += [Dropout(0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x, *args, **kwargs):
        if self.outer_most:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)
示例#21
0
 def build_multi_branches(self):
     net = HybridSequential()
     with net.name_scope():
         self.blocks = EncoderDecoderUnit(
             opts,
             self.num_filters_list[opts.num_stage - 1],
             opts.num_stage - 1,
             innermost=True)
         for stage in range(opts.num_stage - 2, -1, -1):
             self.blocks = EncoderDecoderUnit(opts,
                                              self.num_filters_list[stage],
                                              stage,
                                              inner_block=self.blocks)
         net.add(FirstBlock(opts))
         net.add(self.blocks)
         net.add(ResDBlock(opts, num_filters=16))
     return net
class UnetSkipUnit(HybridBlock):
    def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels, kernel_size=4, strides=2, padding=1,
                             in_channels=outer_channels, use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x):
        if self.outermost:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)
 def _make_layer(self,
                 block,
                 layers,
                 channels,
                 stride,
                 stage_index,
                 in_channels=0):
     layer = HybridSequential(prefix='stage%d_' % stage_index)
     with layer.name_scope():
         layer.add(
             block(channels,
                   stride,
                   channels != in_channels,
                   in_channels=in_channels,
                   prefix=''))
         for _ in range(layers - 1):
             layer.add(
                 block(channels, 1, False, in_channels=channels, prefix=''))
     return layer
示例#24
0
def get_model(vocab_size,
              embedding_size,
              hidden_size,
              dropout_rate,
              classes=3):
    net = HybridSequential()

    with net.name_scope():
        net.add(Embedding(vocab_size, embedding_size))
        net.add(Dropout(args.dropout))
        net.add(
            LSTM(hidden_size=hidden_size // 2,
                 num_layers=1,
                 layout='NTC',
                 bidirectional=True,
                 dropout=dropout_rate))
        net.add(Dense(units=classes, flatten=False))

    return net
class Res_Block(HybridBlock):
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)    
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu1 = LeakyReLU(alpha=0.2)
            
            conv2 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            conv3 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm3 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu3 = LeakyReLU(alpha=0.2)


            res_block = [conv1, norm1, relu1, conv2, norm2, relu2,conv3, norm3, relu3]

            self.se = nn.HybridSequential(prefix='')
            self.se.add(nn.Dense(outer_channels // 16, use_bias=False))
            self.se.add(nn.Activation('relu'))
            self.se.add(nn.Dense(outer_channels, use_bias=False))
            self.se.add(nn.Activation('sigmoid'))
            
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)

    def hybrid_forward(self, F, x):
        residual = x
        x = self.res(x)
        w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
        w = self.se(w).reshape(shape=(0,0,1,1))
        
             
        x = x + residual
        return x,w
示例#26
0
class XceptionModule(HybridBlock):
    def __init__(self,
                 channels,
                 in_channels,
                 num_dev=1,
                 pre_relu=True,
                 down=True,
                 **kwargs):
        super(XceptionModule, self).__init__(**kwargs)
        with self.name_scope():
            self.body = HybridSequential(prefix='body_')
            if pre_relu:
                self.body.add(nn.Activation('relu'))
            self.body.add(_make_separable_conv3(channels, in_channels))
            self.body.add(SyncBatchNorm(num_devices=num_dev))
            self.body.add(nn.Activation('relu'))
            self.body.add(_make_separable_conv3(channels, channels))
            self.body.add(SyncBatchNorm(num_devices=num_dev))
            if down:
                self.body.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
                self.downsample = HybridSequential(prefix='downsample_')
                with self.downsample.name_scope():
                    self.downsample.add(
                        nn.Conv2D(channels,
                                  kernel_size=1,
                                  strides=2,
                                  use_bias=False))
                    self.downsample.add(SyncBatchNorm(num_devices=num_dev))
            else:
                self.body.add(nn.Activation('relu'))
                self.body.add(_make_separable_conv3(channels, channels))
                self.body.add(SyncBatchNorm(num_devices=num_dev))
                self.downsample = None

    def hybrid_forward(self, F, x):
        if self.downsample:
            residual = self.downsample(x)
        else:
            residual = x
        x = self.body(x)
        return x + residual
示例#27
0
class MBConv(HybridBlock):

    def __init__(self, in_channels, channels, t, kernel, stride, **kwargs):
        super(MBConv, self).__init__(**kwargs)
        self.use_shortcut = stride == 1 and in_channels == channels
        self.net = HybridSequential()

        with self.net.name_scope():
            self.net.add(ConvBatchAct(in_channels * t))
            self.net.add(ConvBatchAct(in_channels * t,
                                      kernel=kernel,
                                      stride=stride,
                                      pad=int((kernel - 1) / 2),
                                      num_group=in_channels * t))
            self.net.add(ConvBatchAct(channels, active=True))

    def hybrid_forward(self, F, x):
        out = self.net(x)
        if self.use_shortcut:
            out = F.elemwise_add(out, x)
        return out
示例#28
0
class net3(HybridBlock):
    def __init__(self):
        super(net3,self).__init__()     
        with self.name_scope():
            encoder1=encoder(3,16)
            encoder2=encoder(16,32)
            encoder3=encoder(32,64)
            decoder1=decoder(64,32)
            decoder2=decoder(32,16)
            decoder3=decoder(16,1)
            att2=CA_M2(32)
            att3=CA_M2(64)
            att4=CA_M2(32)
        blocks=[encoder1,encoder2,att2,encoder3,att3,decoder1,att4,decoder2,decoder3]
        self.net1=HybridSequential()
        with self.net1.name_scope():  
            for block in blocks:
                self.net1.add(block)

    def hybrid_forward(self,F,x):
        return self.net1(x)
示例#29
0
 def build_notch(self):
     """Summarize multiple branches"""
     net = HybridSequential()
     with net.name_scope():
         if opts.norm_type is 'batch':
             net.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         elif opts.norm_type is 'instance':
             net.add(InstanceNorm())
         if opts.activation in ['leaky']:
             net.add(LeakyReLU(opts.alpha))
         else:
             net.add(Activation(opts.activation))
         net.add(
             Conv3D(kernel_size=(1, 1, 1),
                    channels=2,
                    use_bias=opts.use_bias))
         if opts.norm_type is 'batch':
             net.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         elif opts.norm_type is 'instance':
             net.add(InstanceNorm())
         net.add(Softmax())
     return net
示例#30
0
    def deep_conv_lstm(is_saved=False):
        """
        共享模型deep_conv_lstm
        """
        net_triplet = HybridSequential(prefix='net_')

        kernel_size = 1
        pool_size = 2
        f_act = 'relu'
        dropout_rate = 0.5

        with net_triplet.name_scope():
            net_triplet.add(Conv1D(channels=256, kernel_size=kernel_size, activation=f_act))
            net_triplet.add(BatchNorm())
            net_triplet.add(MaxPool1D(pool_size=pool_size))
            # net_triplet.add(Dropout(rate=dropout_rate))

            net_triplet.add(Conv1D(channels=128, kernel_size=kernel_size, activation=f_act))
            net_triplet.add(BatchNorm())
            net_triplet.add(MaxPool1D(pool_size=pool_size))
            # net_triplet.add(Dropout(rate=dropout_rate))

            net_triplet.add(Conv1D(channels=64, kernel_size=kernel_size, activation=f_act))
            net_triplet.add(BatchNorm())
            net_triplet.add(MaxPool1D(pool_size=pool_size))
            # net_triplet.add(Dropout(rate=dropout_rate))

            net_triplet.add(Dense(units=128, activation=f_act))
            # net_triplet.add(Dropout(rate=dropout_rate))

            net_triplet.add(Dense(units=128, activation='sigmoid'))

        if is_saved:
            print('[INFO] 存储网络JSON图')
            sym_json = net_triplet(mx.sym.var('data')).tojson()
            json_file = os.path.join(ROOT_DIR, 'experiments', 'sym.json')
            write_line(json_file, sym_json)
        return net_triplet
示例#31
0
def build_discriminator(n_filters, n_channels, mx_ctx):
    netD = HybridSequential()
    with netD.name_scope():
        # Input is n_channels * 64 * 64
        netD.add(Conv2D(n_filters, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 2, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 4, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(n_filters * 8, kernel_size=4, strides=2, padding=1, use_bias=False))
        netD.add(BatchNorm())
        netD.add(LeakyReLU(0.2))

        netD.add(Conv2D(1, 4, 1, 0, use_bias=False))

    netD.initialize(mx.init.Normal(0.02), ctx=mx_ctx)
    netD.hybridize()