Ejemplo n.º 1
0
def decoder(nz, outSize, channel=3, nf=128):
    init_dim = 8
    layers = int(np.log2(outSize) - 3)

    decoder_seq = nn.HybridSequential(prefix='decoder')
    decoder_seq.add(
        nn.Dense(nf * init_dim ** 2, in_units=nz),
        nn.HybridLambda(lambda F, x: F.reshape(x, shape=(-1, nf, init_dim, init_dim))),
        nn.Conv2D(nf, kernel_size=3, strides=3, padding=init_dim),
        nn.ELU(),
        nn.Conv2D(nf, kernel_size=3, strides=3, padding=init_dim),
        nn.ELU(),
    )
    current_dim = init_dim
    for i in range(layers):
        current_dim *= 2
        decoder_seq.add(
            nn.HybridLambda(lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest')),
            nn.Conv2D(nf, kernel_size=3, strides=3, padding=current_dim),
            nn.ELU(),
            nn.Conv2D(nf, kernel_size=3, strides=3, padding=current_dim),
            nn.ELU(),
        )
    decoder_seq.add(
        nn.Conv2D(channel, kernel_size=3, strides=3, padding=current_dim),
        nn.ELU(),
    )
    return decoder_seq
Ejemplo n.º 2
0
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'), nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'), nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'), nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)
    assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3)
Ejemplo n.º 3
0
def encoder(nz, inSize, channel=3, nf=128):
    init_dim = 8
    layers = int(np.log2(inSize) - 2)
    encoder_seq = nn.HybridSequential(prefix='encoder')
    encoder_seq.add(
        nn.Conv2D(channel, kernel_size=3, strides=3, padding=inSize),
        nn.ELU(),
    )
    current_dim = inSize
    for i in range(1, layers):
        encoder_seq.add(
            nn.Conv2D(i * nf, kernel_size=3, strides=3, padding=current_dim),
            nn.ELU(),
            # nn.Conv2D(i * nf, kernel_size=3, strides=3, padding=current_dim),
            # nn.ELU(),
            nn.Conv2D(i * nf, kernel_size=3, strides=2, padding=1),
            nn.ELU(),
        )
        current_dim //= 2
    encoder_seq.add(
        nn.Conv2D(layers * nf, kernel_size=3, strides=3, padding=current_dim),
        nn.ELU(),
        nn.Conv2D(layers * nf, kernel_size=3, strides=3, padding=current_dim),
        nn.ELU(),
    )
    encoder_seq.add(
        nn.HybridLambda(lambda F, x: F.reshape(x, shape=(-1, layers * nf * init_dim ** 2))),
        nn.Dense(nz)
    )
    return encoder_seq
Ejemplo n.º 4
0
 def __init__(self,
              channels,
              kernel,
              stride,
              bias=True,
              ifinf=False,
              *args,
              **kwargs):
     super(ConvLayer, self).__init__(*args, **kwargs)
     with self.name_scope():
         if ifinf:
             self.pad = None
             self.conv = nn.Conv2D(channels,
                                   kernel,
                                   strides=stride,
                                   padding=kernel // 2,
                                   use_bias=bias)
         else:
             self.pad = nn.HybridLambda(lambda F, x: F.pad(
                 x,
                 mode='reflect',
                 pad_width=(0, 0, 0, 0, kernel // 2, kernel // 2, kernel //
                            2, kernel // 2)))
             self.conv = nn.Conv2D(channels,
                                   kernel,
                                   strides=stride,
                                   padding=0,
                                   use_bias=bias)
     return
Ejemplo n.º 5
0
 def make_ppca_model(self):
     dtype = get_default_dtype()
     m = Model()
     m.w = Variable(shape=(self.K,self.D), initial_value=mx.nd.array(np.random.randn(self.K,self.D)))
     dot = nn.HybridLambda(function='dot')
     m.dot = mf.functions.MXFusionGluonFunction(dot, num_outputs=1, broadcastable=False)
     cov = mx.nd.broadcast_to(mx.nd.expand_dims(mx.nd.array(np.eye(self.K,self.K), dtype=dtype), 0),shape=(self.N,self.K,self.K))
     m.z = mf.distributions.MultivariateNormal.define_variable(mean=mx.nd.zeros(shape=(self.N,self.K), dtype=dtype), covariance=cov, shape=(self.N,self.K))
     sigma_2 = Variable(shape=(1,), transformation=PositiveTransformation())
     m.x = mf.distributions.Normal.define_variable(mean=m.dot(m.z, m.w), variance=sigma_2, shape=(self.N,self.D))
     return m
Ejemplo n.º 6
0
def _concat_conv(channels, kernel=1, stride=1, pad=0, name=None):
    block = nn.HybridSequential(prefix=name)
    with block.name_scope():
        block.add(nn.HybridLambda(lambda F, x: F.concat(*x, dim=1)))
        block.add(
            ConvBundle(channels,
                       kernel=kernel,
                       stride=stride,
                       pad=pad,
                       bias=True,
                       prefix='concat_conv_'))
    return block
Ejemplo n.º 7
0
def _upsample_conv(channels, kernel=1, stride=1, pad=0, name=None):
    block = nn.HybridSequential(prefix=name)
    with block.name_scope():
        block.add(
            nn.HybridLambda(
                lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest'),
                prefix='upsample'))
        block.add(
            ConvBundle(channels,
                       kernel=kernel,
                       stride=stride,
                       pad=pad,
                       bias=True,
                       prefix='upsampe_conv_'))
    return block
Ejemplo n.º 8
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 norm_type='batch',
                 use_dropout=False,
                 netG_arch='resnet_9blocks',
                 padding_type='reflect'):
        super(CycleGAN_G, self).__init__()

        norm_layer = get_norm_layer(norm_type=norm_type)
        use_bias = norm_layer == nn.InstanceNorm

        if netG_arch == 'resnet_9blocks':
            n_blocks = 9
        elif netG_arch == 'resnet_6blocks':
            n_blocks = 6
        else:
            raise ValueError('Unknown netG_arch.')

        self.block_c7s1_64 = nn.Sequential()
        block_c7s1_64 = [
            nn.ReflectionPad2D(3),
            nn.Conv2D(channels=ngf,
                      in_channels=input_nc,
                      kernel_size=7,
                      strides=1,
                      padding=0,
                      use_bias=use_bias),
            norm_layer(in_channels=ngf),
            nn.LeakyReLU(0)
        ]
        self.block_c7s1_64.add(*block_c7s1_64)

        self.block_dk = nn.Sequential()
        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            block_dk = [
                nn.Conv2D(in_channels=ngf * mult,
                          channels=ngf * mult * 2,
                          kernel_size=3,
                          strides=2,
                          padding=1,
                          use_bias=use_bias),
                norm_layer(in_channels=ngf * mult * 2),
                nn.LeakyReLU(0)
            ]
            self.block_dk.add(*block_dk)

        self.block_Rk = nn.Sequential()
        mult = 2**n_downsampling
        for i in range(n_blocks):  # add ResNet blocks
            block_Rk = [
                ResidualBlock(ngf * mult,
                              padding_type=padding_type,
                              norm_layer=norm_layer,
                              use_dropout=use_dropout,
                              use_bias=use_bias)
            ]
            self.block_Rk.add(*block_Rk)

        self.block_uk = nn.Sequential()
        n_upsampling = 2
        for i in range(n_upsampling):  # add upsampling layers
            mult = 2**(n_upsampling - i)
            block_uk = [
                nn.Conv2DTranspose(in_channels=ngf * mult,
                                   channels=ngf * mult // 2,
                                   kernel_size=3,
                                   strides=2,
                                   padding=1,
                                   output_padding=1,
                                   use_bias=use_bias),
                norm_layer(in_channels=ngf * mult // 2),
                nn.LeakyReLU(0)
            ]
            self.block_uk.add(*block_uk)

        self.block_c7s1_3 = nn.Sequential()
        block_c7s1_3 = [
            nn.ReflectionPad2D(3),
            nn.Conv2D(in_channels=ngf,
                      channels=output_nc,
                      kernel_size=7,
                      padding=0),
            nn.HybridLambda('tanh')
        ]

        self.block_c7s1_3.add(*block_c7s1_3)
Ejemplo n.º 9
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
            nn.ReflectionPad2D(3),
            nn.Conv2D(ngf,
                      kernel_size=7,
                      strides=1,
                      padding=0,
                      use_bias=False,
                      in_channels=input_nc),
            InstanceNorm2D(),
            nn.Activation('relu')
        ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                nn.ReflectionPad2D(1),
                nn.Conv2D(ngf * mult * 2,
                          kernel_size=3,
                          strides=2,
                          padding=0,
                          use_bias=False,
                          in_channels=ngf * mult),
                InstanceNorm2D(),
                nn.Activation('relu')
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = nn.Dense(1, use_bias=False)
        self.gmp_fc = nn.Dense(1, use_bias=False)
        self.conv1x1 = nn.Conv2D(ngf * mult,
                                 kernel_size=1,
                                 strides=1,
                                 use_bias=True,
                                 in_channels=ngf * mult * 2)
        self.relu = nn.Activation('relu')

        # Gamma, Beta block
        FC = [
            nn.Dense(ngf * mult, use_bias=False),
            nn.Activation('relu'),
            nn.Dense(ngf * mult, use_bias=False),
            nn.Activation('relu')
        ]
        self.gamma = nn.Dense(ngf * mult, use_bias=False)
        self.beta = nn.Dense(ngf * mult, use_bias=False)

        # Up-Sampling Bottleneck
        self.UpBlock1s = nn.HybridSequential()
        for i in range(n_blocks):
            self.UpBlock1s.add(ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                nn.HybridLambda(lambda F, x: F.UpSampling(
                    x, scale=2, sample_type='nearest')),
                nn.ReflectionPad2D(1),
                nn.Conv2D(int(ngf * mult / 2),
                          kernel_size=3,
                          strides=1,
                          padding=0,
                          use_bias=False,
                          in_channels=ngf * mult),
                ILN(int(ngf * mult / 2)),
                nn.Activation('relu')
            ]

        UpBlock2 += [
            nn.ReflectionPad2D(3),
            nn.Conv2D(output_nc,
                      kernel_size=7,
                      strides=1,
                      padding=0,
                      use_bias=False,
                      in_channels=ngf),
            nn.Activation('tanh')
        ]

        self.DownBlock = nn.HybridSequential()
        self.DownBlock.add(*DownBlock)
        self.FC = nn.HybridSequential()
        self.FC.add(*FC)
        self.UpBlock2 = nn.HybridSequential()
        self.UpBlock2.add(*UpBlock2)
Ejemplo n.º 10
0
    def __init__(self,
                 levels,
                 channels=128,
                 weighted_add=True,
                 expand_channels=False,
                 **kwargs):
        super(BiFPNUnit, self).__init__(**kwargs)
        act_cfg = dict(type='Swish')
        with self.name_scope():
            # top-down branch
            self.top_down_conv = nn.HybridSequential(prefix='td_')
            self.top_down_upsampler = nn.HybridSequential(
                prefix='td.upsampler_')
            for i, level in enumerate(levels[::-1]):
                with self.top_down_conv.name_scope():
                    block = nn.HybridSequential(prefix=f'[{level}]_')
                    with block.name_scope():
                        if i > 0:
                            block.add(
                                FusionAdd(2,
                                          weighted=weighted_add,
                                          prefix='fusion2_'))
                        block.add(build_activation(act_cfg))
                        block.add(
                            _dw_conv_block(int(channels *
                                               2**(len(levels) - i - 1))
                                           if expand_channels else channels,
                                           kernel=3,
                                           stride=1,
                                           pad=1,
                                           name='conv_'))
                    self.top_down_conv.add(block)
                if i < len(levels) - 1:
                    with self.top_down_upsampler.name_scope():
                        if expand_channels:
                            up = _upsample_conv(
                                channels * 2**(len(levels) - i - 2),
                                name=
                                f'upto.{channels * 2 ** (len(levels) - i - 2)}_'
                            )
                        else:
                            up = nn.HybridLambda(lambda F, x: F.UpSampling(
                                x, scale=2, sample_type='nearest'),
                                                 prefix=f'upsample[{i}]')
                        self.top_down_upsampler.add(up)

            # bottom-up branch
            self.bottom_up_conv = nn.HybridSequential(prefix='bu_')
            self.bottom_up_downsampler = nn.HybridSequential(
                prefix='bu.downsampler_')
            for i, level in enumerate(levels):
                with self.bottom_up_conv.name_scope():
                    block = nn.HybridSequential(prefix=f'[{level}]_')
                    with block.name_scope():
                        if i == 0:
                            block.add(
                                FusionAdd(2,
                                          weighted=weighted_add,
                                          prefix='fusion2_'))
                        else:
                            block.add(
                                FusionAdd(3,
                                          weighted=weighted_add,
                                          prefix='fusion3_'))
                        block.add(build_activation(act_cfg))
                        block.add(
                            _dw_conv_block(
                                channels *
                                2**i if expand_channels else channels,
                                kernel=3,
                                stride=1,
                                pad=1,
                                name='conv_'))
                    self.bottom_up_conv.add(block)
                if i < len(levels) - 1:
                    with self.bottom_up_downsampler.name_scope():
                        if expand_channels:
                            down = ConvBundle(
                                channels * 2**(len(levels) - i - 2),
                                kernel=1,
                                stride=2,
                                prefix=
                                f'downto.{channels * 2 ** (len(levels) - i - 2)}_'
                            )
                        else:
                            down = nn.MaxPool2D(pool_size=3,
                                                strides=2,
                                                padding=1)
                        self.bottom_up_downsampler.add(down)
Ejemplo n.º 11
0
prenet.collect_params().initialize(mx.init.Xavier(magnitude=2.24),
                                   ctx=model_ctx)

trees = []
for _ in range(10):
    _, (data, label) = next(enumerate(train_data))
    tree = Tree(lambda: nn.Dense(10))
    extend_tree(prenet(data)[0:2], tree)
    trees.append(tree)

forest = contrib.nn.HybridConcurrent()
with forest.name_scope():
    for tree in trees:
        forest.add(tree)

reshape = nn.HybridLambda(lambda F, x: F.reshape(x, shape=(0, -4, 10, -1)))

net = gluon.nn.HybridSequential()
with net.name_scope():
    net.add(prenet)
    net.add(forest)
    net.add(reshape)

net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=model_ctx)
error = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
acc = mx.metric.Accuracy()

for _, (x, y) in enumerate(train_data):
    data = nd.array(x).as_in_context(model_ctx)
    label = nd.array(y).as_in_context(model_ctx)