Exemplo n.º 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 reduction=16,
                 **kwargs):
        super(PreSEAttBlock, self).__init__(**kwargs)
        mid_cannels = out_channels // reduction

        with self.name_scope():
            self.bn = nn.BatchNorm(in_channels=in_channels,
                                   use_global_stats=bn_use_global_stats)
            self.relu = nn.Activation("relu")
            self.conv1 = conv1x1(in_channels=in_channels,
                                 out_channels=mid_cannels,
                                 use_bias=True)
            self.conv2 = conv1x1(in_channels=mid_cannels,
                                 out_channels=out_channels,
                                 use_bias=True)
            self.sigmoid = nn.Activation("sigmoid")
Exemplo n.º 2
0
    def __init__(self, num_category, **kwargs):
        super(Net1, self).__init__(**kwargs)
        with self.name_scope():
            # layers created in name_scope will inherit name space
            # from parent layer.
            self.bn = nn.BatchNorm()
            self.dropout = nn.Dropout(0.3)
            self.fc1 = nn.Dense(4096, activation="relu")
            self.fc2 = nn.Dense(num_category)
            self.image_lstm = gluon.rnn.LSTM(hidden_size=1024, num_layers=5)
            '''
            self.lstm_cell = [gluon.rnn.LSTMCell(hidden_size=100) for i in range(5)]
            self.h = [nd.random.uniform(shape=(15, 100)) for i in range(5)]
            self.c = [nd.random.uniform(shape=(15, 100)) for i in range(5)]
            '''

            self.question_lstm = gluon.rnn.LSTM(hidden_size=100, num_layers=12)
            self.image_fc = nn.Dense(1024, activation="relu")
            self.question_fc = nn.Dense(1024, activation="relu")
            self.ctx = gb.try_gpu()
Exemplo n.º 3
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              padding,
              bn_use_global_stats,
              **kwargs):
     super(DarkConv, self).__init__(**kwargs)
     with self.name_scope():
         self.conv = nn.Conv2D(
             channels=out_channels,
             kernel_size=kernel_size,
             padding=padding,
             use_bias=False,
             in_channels=in_channels)
         self.bn = nn.BatchNorm(
             in_channels=out_channels,
             use_global_stats=bn_use_global_stats)
         # self.bn = nn.BatchNorm(in_channels=out_channels, momentum=0.01)
         self.activ = nn.LeakyReLU(alpha=0.1)
Exemplo n.º 4
0
 def __init__(self, class_num,ctx):
     super(FCNx8, self).__init__()
     self.desc = "fcnx8"
     with self.name_scope():
         self.encode = EncodeNet_gluoncv(ctx)
         self.decode = nn.Sequential(prefix="decode")
         self.decode.add(
             nn.Conv2D(channels=512,kernel_size=3,padding=1,strides=1),
             nn.BatchNorm(),
             nn.Activation("relu"),
             nn.Dropout(0.1),
             nn.Conv2D(channels=class_num,kernel_size=1,padding=0,strides=1)
         )
         for layer in self.decode:
             if isinstance(layer,nn.Conv2D):
                 layer.initialize(init=mx.init.Xavier(), ctx=ctx)
             else:
                 layer.initialize(ctx=ctx)
         self.decode.collect_params().setattr('lr_mult',10)
     self.upscale = 8
Exemplo n.º 5
0
def _add_conv(out,
              channels=1,
              kernel=1,
              stride=1,
              pad=0,
              num_group=1,
              active=True,
              batchnorm=True):
    out.add(SamePadding(kernel, stride, dilation=(1, 1)))
    out.add(
        nn.Conv2D(channels,
                  kernel,
                  stride,
                  pad,
                  groups=num_group,
                  use_bias=False))
    if batchnorm:
        out.add(nn.BatchNorm(scale=True, momentum=0.99, epsilon=1e-3))
    if active:
        out.add(nn.Swish())
Exemplo n.º 6
0
 def __init__(self,
              kernel_size,
              channels_out,
              channels_in=0,
              strides=1,
              with_bn=True,
              **kwargs):
     super(convolution, self).__init__(**kwargs)
     paddings = (kernel_size -
                 1) // 2  # determine paddings to keep resolution unchanged
     with self.name_scope():
         self.conv = nn.Conv2D(
             channels_out,
             kernel_size,
             strides,
             paddings,
             in_channels=channels_in,
             use_bias=not with_bn)  # infer input shape if not specified
         self.bn = nn.BatchNorm(
             in_channels=channels_out) if with_bn else nn.Sequential()
Exemplo n.º 7
0
 def __init__(self, num_classes, **kwargs):
     super(ResNet, self).__init__(**kwargs)
     with self.name_scope():
         b1 = nn.Conv2D(16, kernel_size=3, strides=1, padding=1)
         b2 = nn.Sequential()
         for _ in range(8):
             b2.add(Residual(16))
         b3 = nn.Sequential()
         b3.add(Residual(32, same_shape=False))
         for _ in range(7):
             b3.add(Residual(32))
         b4 = nn.Sequential()
         b4.add(Residual(64, same_shape=False))
         for _ in range(7):
             b4.add(Residual(64))
         b5 = nn.Sequential()
         b5.add(nn.BatchNorm(), nn.Activation(activation="relu"),
                nn.AvgPool2D(pool_size=8), nn.Dense(num_classes))
         self.net = nn.Sequential()
         self.net.add(b1, b2, b3, b4, b5)
Exemplo n.º 8
0
    def __init__(self, in_channels, out_channels, kernel_size, strides,
                 padding, use_bias, use_bn, bn_use_global_stats, **kwargs):
        super(VGGConv, self).__init__(**kwargs)
        self.use_bn = use_bn

        with self.name_scope():
            self.conv = nn.Conv2D(channels=out_channels,
                                  kernel_size=kernel_size,
                                  strides=strides,
                                  padding=padding,
                                  use_bias=use_bias,
                                  weight_initializer=Xavier(
                                      rnd_type='gaussian',
                                      factor_type='out',
                                      magnitude=2),
                                  in_channels=in_channels)
            if self.use_bn:
                self.bn = nn.BatchNorm(in_channels=out_channels,
                                       use_global_stats=bn_use_global_stats)
            self.activ = nn.Activation("relu")
Exemplo n.º 9
0
def resnet18(num_classes):
    net = nn.HybridSequential()
    net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
            nn.BatchNorm(), nn.Activation('relu'))

    def resnet_block(num_channels, num_residuals, first_block=False):
        blk = nn.HybridSequential()
        for i in range(num_residuals):
            if i == 0 and not first_block:
                blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
            else:
                blk.add(Residual(num_channels))
        return blk

    net.add(resnet_block(64, 2, first_block=True),
            resnet_block(128, 2),
            resnet_block(256, 2),
            resnet_block(512, 2))
    net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
    return net
Exemplo n.º 10
0
def get_spatial_temporal_conv(in_filters, out_filter, stride, use_bias=False):
    blk = nn.Sequential()

    i = 3 * in_filters * out_filter * 3 * 3
    i /= in_filters * 3 * 3 + 3 * out_filter
    middle_filters = int(i)
    #print("Number of middle filters: {0}".format(middle_filters))
    blk.add(
        nn.Conv3D(channels=middle_filters,
                  kernel_size=(1, 3, 3),
                  strides=(1, stride[0], stride[1]),
                  padding=(0, 1, 1),
                  use_bias=use_bias), nn.BatchNorm(),
        nn.Activation(activation='relu'),
        nn.Conv3D(channels=out_filter,
                  kernel_size=(3, 1, 1),
                  strides=(stride[0], 1, 1),
                  padding=(1, 0, 0),
                  use_bias=use_bias))
    return blk
Exemplo n.º 11
0
    def __init__(self, block, layers, channels, classes=10, **kwargs):
        super(CIFARResNetV1, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False))
            self.features.add(nn.BatchNorm())

            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=channels[i]))
            self.features.add(nn.GlobalAvgPool2D())

            self.output = nn.Dense(classes, in_units=channels[-1])
Exemplo n.º 12
0
    def __init__(self,
                 channels,
                 stride,
                 downsample=False,
                 in_channels=0,
                 init=True,
                 **kwargs):
        super(BasicBlockV2, self).__init__(**kwargs)
        self.channels = channels
        self.stride = stride
        self.in_channels = in_channels

        self.bn = nn.BatchNorm()
        self.body = nn.HybridSequential(prefix='')
        self.downsample = None
        if downsample:
            self.downsample = nn.HybridSequential(prefix='')

        if init:
            self._init()
Exemplo n.º 13
0
    def __init__(self, num_dilation, **kwargs):
        super(MultiLayerDilation, self).__init__(**kwargs)
        self.conv1 = nn.Conv2D(channels=512, kernel_size=3, dilation=num_dilation, padding=num_dilation)
        self.conv2 = nn.Conv2D(channels=512, kernel_size=3, dilation=num_dilation, padding=num_dilation)
        self.conv3 = nn.Conv2D(channels=512, kernel_size=3, dilation=num_dilation, padding=num_dilation)
        self.conv4 = nn.Conv2D(channels=256, kernel_size=3, dilation=num_dilation, padding=num_dilation)
        self.conv5 = nn.Conv2D(channels=128, kernel_size=3, dilation=num_dilation, padding=num_dilation)
        self.conv6 = nn.Conv2D(channels=64, kernel_size=3, dilation=num_dilation, padding=num_dilation)

        self.bn1 = nn.BatchNorm()
        self.bn2 = nn.BatchNorm()
        self.bn3 = nn.BatchNorm()
        self.bn4 = nn.BatchNorm()
        self.bn5 = nn.BatchNorm()
        self.bn6 = nn.BatchNorm()
Exemplo n.º 14
0
def conv(channels,
         k_size=4,
         stride=2,
         pad=1,
         bn=True,
         drop_out=True,
         p=0.2,
         ReLU=True,
         sequential=True):

    layers = []

    if ReLU:
        layers += [
            nn.Conv2D(channels=channels,
                      strides=stride,
                      kernel_size=k_size,
                      padding=pad,
                      activation='relu',
                      use_bias=False)
        ]
    else:
        layers += [
            nn.Conv2D(channels=channels,
                      strides=stride,
                      kernel_size=k_size,
                      padding=pad,
                      use_bias=False)
        ]
    if bn:
        layers += [nn.BatchNorm()]
    if drop_out:
        layers += [nn.Dropout(p)]
    if sequential:
        out = nn.HybridSequential()
        for layer in layers:
            out.add(layer)
        return out

    else:
        return layers
Exemplo n.º 15
0
    def __init__(self, k, scale,
                 activation='relu',
                 normalization='batch'):
        super(Discriminator, self).__init__()
        self.alpha = 0
        self.scale=scale
        self.activation = activation
        self.normalization = normalization
        i=0
        self.network = dict()


        name = f'fromrgb_growth'
        self.network[name] = nn.HybridSequential(prefix=name)
        with self.network[name].name_scope():
            self.network[name].add(nn.Conv2D(nf(scale), 3, 1, 1, use_bias=True))
            self.network[name].add(nn.LeakyReLU(0.2))

        name = f'fromrgb_main'
        self.network[name] = nn.HybridSequential(prefix=name)
        with self.network[name].name_scope():
            self.network[name].add(nn.Conv2D(nf(scale-1), 3, 1, 1, use_bias=True))
            self.network[name].add(nn.LeakyReLU(0.2))



        for i in range(scale, 1, -1):
            name = f'growth_{i}'
            self.network[name] = nn.HybridSequential(prefix=name)
            with self.network[name].name_scope():
                self.network[name].add(nn.Conv2D(nf(i-1), 3, 1, 1, use_bias=False))
                if 'batch' in normalization: self.network[name].add(nn.BatchNorm())
                self.network[name].add(nn.LeakyReLU(0.2))
                self.network[name].add(nn.MaxPool2D())

        self.network['downscale'] = nn.HybridSequential()
        self.network['downscale'].add(nn.MaxPool2D())

        self.network['result'] = nn.HybridSequential()
        self.network['result'].add(nn.Conv2D(1000, 1, 1, 0, use_bias=True))
        self.network['result'].add(nn.Conv2D(1, 1, 1, 0, use_bias=True))
Exemplo n.º 16
0
    def __init__(self, growthRate, depth, reduction, nClasses, bottleneck):
        super(DenseNet, self).__init__()

        nDenseBlocks = (depth - 4) // 3
        if bottleneck:
            nDenseBlocks //= 2

        nChannels = 2 * growthRate
        with self.name_scope():
            self.conv1 = nn.Conv2D(nChannels,
                                   kernel_size=3,
                                   padding=1,
                                   use_bias=False,
                                   weight_initializer=init.Normal(
                                       math.sqrt(2. / nChannels)))
            self.dense1 = self._make_dense(growthRate, nDenseBlocks,
                                           bottleneck)

        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        with self.name_scope():
            self.trans1 = Transition(nOutChannels)

        nChannels = nOutChannels
        with self.name_scope():
            self.dense2 = self._make_dense(growthRate, nDenseBlocks,
                                           bottleneck)
        nChannels += nDenseBlocks * growthRate
        nOutChannels = int(math.floor(nChannels * reduction))
        with self.name_scope():
            self.trans2 = Transition(nOutChannels)

        nChannels = nOutChannels
        with self.name_scope():
            self.dense3 = self._make_dense(growthRate, nDenseBlocks,
                                           bottleneck)
        nChannels += nDenseBlocks * growthRate

        with self.name_scope():
            self.bn1 = nn.BatchNorm()
            self.fc = nn.Dense(nClasses)
Exemplo n.º 17
0
 def __init__(self, **kwargs):
     super(colorize_net, self).__init__(**kwargs)
     with self.name_scope():
         self.bn0 = nn.BatchNorm()
         self.conv0 = nn.Conv2D(256, 1, activation='relu')
         self.bn1 = nn.BatchNorm()
         self.conv1 = nn.Conv2D(128, 3, padding=1, activation='relu')
         self.bn2 = nn.BatchNorm()
         self.conv2 = nn.Conv2D(64, 3, padding=1, activation='relu')
         self.bn3 = nn.BatchNorm()
         self.conv3 = nn.Conv2D(64, 3, padding=1, activation='relu')
         self.bn4 = nn.BatchNorm()
         self.conv4 = nn.Conv2D(32, 3, padding=1, activation='relu')
         self.bn5 = nn.BatchNorm()
         self.conv5 = nn.Conv2D(2, 3, padding=1, activation='sigmoid')
Exemplo n.º 18
0
    def _make_features(self, layers, filters, batch_norm, step):
        featurizer = nn.HybridSequential(prefix='')

        count = 0
        for i, num in enumerate(layers):
            for _ in range(num):

                if count not in step_spec[step]:
                    conv_layer = nn.QConv2D(filters[i],
                                            kernel_size=3,
                                            padding=1,
                                            weight_initializer=Xavier(
                                                rnd_type='gaussian',
                                                factor_type='out',
                                                magnitude=2),
                                            bias_initializer='zeros',
                                            bits=1,
                                            apply_scaling=True)
                    featurizer.add(conv_layer)
                    featurizer.add(nn.Dropout(rate=0.25))
                    featurizer.add(nn.Activation('relu'))
                else:
                    conv_layer = nn.Conv2D(filters[i],
                                           kernel_size=3,
                                           padding=1,
                                           weight_initializer=Xavier(
                                               rnd_type='gaussian',
                                               factor_type='out',
                                               magnitude=2),
                                           bias_initializer='zeros')
                    featurizer.add(conv_layer)
                    featurizer.add(nn.Dropout(rate=0.25))
                    featurizer.add(nn.Activation('relu'))

                count += 1

                if batch_norm:
                    featurizer.add(nn.BatchNorm())

            featurizer.add(nn.MaxPool2D(strides=2))
        return featurizer
Exemplo n.º 19
0
def down_sample_blk(num_filters):
    '''
    nn.MaxPool2D(2) halves the height and width of the input.

    Pass the output from down_sample_blk to cls_predictor,
    the 3x3 conv in cls_predictor actually covers a 10x10 area in
    the input of down_sample_blk.

    E.g.
    x x x                required input for a 3x3 conv in cls_predictor
    y y y y y y          before MaxPool2D(2)
    z z z z z z z z      required input for a 3x3 conv in down_sample_blk
    0 1 2 3 4 5 6 7 8 9  required input for a 3x3 conv in down_sample_blk
    '''
    blk = nn.HybridSequential()
    for _ in range(2):
        blk.add(nn.Conv2D(channels=num_filters, kernel_size=3, padding=1),
                nn.BatchNorm(in_channels=num_filters), nn.Activation('relu'))
    blk.add(nn.MaxPool2D(2))
    blk.hybridize()
    return blk
Exemplo n.º 20
0
def conv_act_layer(in_channels,
                   prefix,
                   num,
                   kernel_size,
                   padding,
                   stride=1,
                   use_bn=False):
    '''
        prefix = 'conv'+ str(layer num)+'_'
    '''
    net = nn.HybridSequential()
    net.add(
        nn.Conv2D(channels=in_channels,
                  kernel_size=kernel_size,
                  strides=stride,
                  padding=padding,
                  prefix='{}{}_'.format(prefix, num)))
    if use_bn:
        net.add(nn.BatchNorm(prefix='{}{}_'.format(prefix, num)))
    net.add(nn.Activation('relu', prefix='{}relu_{}_'.format(prefix, num)))
    return net
Exemplo n.º 21
0
    def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
        super(ResNetV1, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            if thumbnail:
                self.features.add(_conv3x3x3(channels[0], 1, 3))
            else:
                self.features.add(nn.Conv3D(channels[0], 3, 1, 1, use_bias=False,
                                            in_channels=1))                          ## hu :in_channels=3-->1
                self.features.add(nn.BatchNorm())
                self.features.add(nn.Activation('relu'))
                self.features.add(nn.MaxPool3D(3, 2, 1))

            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(self._make_layer(block, num_layer, channels[i+1],
                                                   stride, i+1, in_channels=channels[i]))
            self.features.add(nn.GlobalAvgPool3D())

            self.output = nn.Dense(classes, in_units=channels[-1])
Exemplo n.º 22
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              strides=1,
              padding=0,
              groups=1,
              bn_use_global_stats=False,
              **kwargs):
     super(ConvBlock, self).__init__(**kwargs)
     with self.name_scope():
         self.conv = nn.Conv2D(channels=out_channels,
                               kernel_size=kernel_size,
                               strides=strides,
                               padding=padding,
                               groups=groups,
                               use_bias=False,
                               in_channels=in_channels)
         self.bn = nn.BatchNorm(in_channels=out_channels,
                                use_global_stats=bn_use_global_stats)
         self.activ = nn.Activation('relu')
Exemplo n.º 23
0
def resnet18(num_classes):
    """A slightly modified ResNet-18 model"""
    def resnet_block(num_channels, num_residuals, first_block=False):
        blk = nn.Sequential()
        for i in range(num_residuals):
            if i == 0 and not first_block:
                blk.add(d2l.Residual(num_channels, use_1x1conv=True,
                                     strides=2))
            else:
                blk.add(d2l.Residual(num_channels))
        return blk

    net = nn.Sequential()
    # This model uses a smaller convolution kernel, stride, and padding and
    # removes the maximum pooling layer
    net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1), nn.BatchNorm(),
            nn.Activation('relu'))
    net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2),
            resnet_block(256, 2), resnet_block(512, 2))
    net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
    return net
Exemplo n.º 24
0
 def __init__(self, channels, **kwargs):
     super(FeatureFusionMoudle, self).__init__(**kwargs)
     self.block = nn.HybridSequential()
     with self.block.name_scope():
         self.block.add(
             nn.Conv2D(in_channels=channels,
                       channels=channels,
                       kernel_size=3,
                       strides=1,
                       padding=1))
         self.block.add(nn.BatchNorm())
         self.block.add(nn.Activation('relu'))
     self.pool = nn.GlobalAvgPool2D()
     self.conv1 = nn.Conv2D(in_channels=channels,
                            channels=channels,
                            kernel_size=1,
                            strides=1)
     self.conv2 = nn.Conv2D(in_channels=channels,
                            channels=channels,
                            kernel_size=1,
                            strides=1)
Exemplo n.º 25
0
    def __init__(self):
        super(SRGenerator, self).__init__()
        self.conv1 = nn.Conv2D(64,
                               kernel_size=3,
                               strides=1,
                               padding=1,
                               activation='relu')
        self.res_block = nn.HybridSequential()
        with self.name_scope():
            for i in range(16):
                self.res_block.add(ResnetBlock())

            self.res_block.add(
                nn.Conv2D(64,
                          kernel_size=3,
                          strides=1,
                          padding=1,
                          use_bias=False), nn.BatchNorm())
        self.subpix_block1 = SubpixelBlock()
        self.subpix_block2 = SubpixelBlock()
        self.conv4 = nn.Conv2D(3, kernel_size=1, strides=1, activation='tanh')
Exemplo n.º 26
0
 def __init__(self, opt):
     super(RNNText, self).__init__()
     with self.name_scope():
         if opt.model == 'lstm':
             self.rnn = rnn.LSTM(opt.num_hidden,
                                 opt.num_layers,
                                 bidirectional=opt.bidirectional,
                                 dropout=opt.drop)
         elif opt.model == 'gru':
             self.rnn = rnn.GRU(opt.num_hidden,
                                opt.num_layers,
                                bidirectional=opt.bidirectional,
                                dropout=opt.drop)
         elif opt.model == 'rnn':
             self.rnn = rnn.RNN(opt.num_hidden,
                                opt.num_layers,
                                dropout=opt.drop)
         else:
             raise NotImplementedError
         self.fc = nn.Dense(opt.num_hidden * 2)
         self.bn = nn.BatchNorm()
Exemplo n.º 27
0
    def __init__(self, channels=64, r=4):
        super(ResGlobLocaforGlobLocaChaFuse, self).__init__()
        inter_channels = int(channels // r)

        with self.name_scope():

            self.local_att = nn.HybridSequential(prefix='local_att')
            self.local_att.add(nn.Conv2D(inter_channels, kernel_size=1, strides=1, padding=0))
            self.local_att.add(nn.BatchNorm())
            self.local_att.add(nn.Activation('relu'))
            self.local_att.add(nn.Conv2D(channels, kernel_size=1, strides=1, padding=0))
            self.local_att.add(nn.BatchNorm())

            self.global_att = nn.HybridSequential(prefix='global_att')
            self.global_att.add(nn.GlobalAvgPool2D())
            self.global_att.add(nn.Conv2D(inter_channels, kernel_size=1, strides=1, padding=0))
            self.global_att.add(nn.BatchNorm())
            self.global_att.add(nn.Activation('relu'))
            self.global_att.add(nn.Conv2D(channels, kernel_size=1, strides=1, padding=0))
            self.global_att.add(nn.BatchNorm())

            self.local_att2 = nn.HybridSequential(prefix='local_att2')
            self.local_att2.add(nn.Conv2D(inter_channels, kernel_size=1, strides=1, padding=0))
            self.local_att2.add(nn.BatchNorm())
            self.local_att2.add(nn.Activation('relu'))
            self.local_att2.add(nn.Conv2D(channels, kernel_size=1, strides=1, padding=0))
            self.local_att2.add(nn.BatchNorm())

            self.global_att2 = nn.HybridSequential(prefix='global_att2')
            self.global_att2.add(nn.GlobalAvgPool2D())
            self.global_att2.add(nn.Conv2D(inter_channels, kernel_size=1, strides=1, padding=0))
            self.global_att2.add(nn.BatchNorm())
            self.global_att2.add(nn.Activation('relu'))
            self.global_att2.add(nn.Conv2D(channels, kernel_size=1, strides=1, padding=0))
            self.global_att2.add(nn.BatchNorm())

            self.sig1 = nn.Activation('sigmoid')
            self.sig2 = nn.Activation('sigmoid')
Exemplo n.º 28
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides,
                 padding,
                 activate=True,
                 **kwargs):
        super(DartsConv, self).__init__(**kwargs)
        self.activate = activate

        with self.name_scope():
            if self.activate:
                self.activ = nn.Activation("relu")
            self.conv = nn.Conv2D(channels=out_channels,
                                  kernel_size=kernel_size,
                                  strides=strides,
                                  padding=padding,
                                  use_bias=False,
                                  in_channels=in_channels)
            self.bn = nn.BatchNorm(in_channels=out_channels)
Exemplo n.º 29
0
    def change_scale(self, ctx):

        self.scale += 1
        self.network[f'fromrgb_main'] = self.network[f'fromrgb_growth']

        name = f'fromrgb_growth'
        self.network[name] = nn.HybridSequential(prefix=name)
        with self.network[name].name_scope():
            self.network[name].add(nn.Conv2D(nf(self.scale), 3, 1, 1, use_bias=True))
            self.network[name].add(nn.LeakyReLU(0.2))

        name = f'growth_{self.scale}'
        self.network[name] = nn.HybridSequential()
        with self.network[name].name_scope():
            self.network[name].add(nn.Conv2D(nf(self.scale-1), 3, 1, 1, use_bias=False))
            if 'batch' in self.normalization: self.network[name].add(nn.BatchNorm())
            self.network[name].add(nn.LeakyReLU(0.2))
            self.network[name].add(nn.MaxPool2D())

        self.network[f'fromrgb_growth'].collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
        self.network[f'growth_{self.scale}'].collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
Exemplo n.º 30
0
def _add_conv(out,
              channels=1,
              kernel=1,
              stride=1,
              pad=0,
              num_group=1,
              active=True,
              lite=False):
    out.add(
        nn.Conv2D(channels,
                  kernel,
                  stride,
                  pad,
                  groups=num_group,
                  use_bias=False))
    out.add(nn.BatchNorm(scale=True, momentum=0.99, epsilon=1e-3))
    if active:
        if lite:
            out.add(ReLU6())
        else:
            out.add(nn.Swish())