コード例 #1
0
def cin_resnet_block(input_shape, z_dim, padding, norm_layer, use_dropout,
                     use_bias, block_name, **kwargs):
    num_filters = input_shape[1]

    block = nn.Sequential(input_shape=input_shape, layer_name=block_name)
    block.append(
        nn.Conv2DLayer(input_shape,
                       num_filters,
                       3,
                       border_mode=padding,
                       no_bias=not use_bias,
                       activation=None,
                       layer_name=block_name + '/conv1'))
    block.append(norm_layer(input_shape, z_dim,
                            layer_name=block_name + '/CIN'))
    block.append(
        nn.ActivationLayer(block.output_shape,
                           layer_name=block_name + '/relu1'))
    if use_dropout:
        block.append(
            nn.DropoutLayer(block.output_shape,
                            .5,
                            layer_name=block_name + '/dropout'))

    block.append(
        nn.Conv2DLayer(block.output_shape,
                       num_filters,
                       3,
                       border_mode=padding,
                       no_bias=not use_bias,
                       activation=None,
                       layer_name=block_name + '/conv2'))
    block.append(nn.InstanceNormLayer(block.output_shape, block_name + '/IN'))
    return block
コード例 #2
0
    def _make_layer(self, block, shape, planes, blocks, stride=1, name=''):
        downsample = None
        if stride != 1 or shape[1] != planes * block.upscale_factor:
            downsample = True

        layers = [
            block(shape,
                  planes,
                  stride,
                  downsample=downsample,
                  activation=self.activation,
                  layer_name=name + '_0',
                  block=self.custom_block,
                  **self.kwargs)
        ]

        for i in range(1, blocks):
            layers.append(
                block(layers[-1].output_shape,
                      planes,
                      activation=self.activation,
                      layer_name=name + '_%d' % i,
                      block=self.custom_block,
                      **self.kwargs))
        return nn.Sequential(layers, layer_name=name)
コード例 #3
0
ファイル: DeepLTE.py プロジェクト: justanhduc/DeepLTE
    def __init__(self, config_file, **kwargs):
        super(DeepLTE, self).__init__(config_file, **kwargs)
        self.num_frames = self.config['model']['num_frames']
        self.order = self.config['model']['order']
        self.nodes = self.config['model']['nodes']
        self.targets = self.config['model']['targets']
        self.interps = self.config['model']['interps']
        self.alpha = self.config['model']['alpha']
        self.dropout = self.config['model']['dropout']
        self.perceptual_cost = self.config['model']['perceptual_cost']
        self.vgg_weight_file = self.config['model']['vgg_weight_file']
        self.input_tensor_shape = (None,) + self.input_shape[1:]

        enc = nn.model_zoo.resnet34(self.input_tensor_shape, 64, 'lrelu', False, False, name='encoder', alpha=self.alpha)
        self.model.append(enc)

        subnet = 'decoder'
        dec = nn.Sequential(input_shape=enc.output_shape, layer_name='decoder')
        dec.append(nn.ResizingLayer(dec.input_shape, 2, layer_name=subnet + '_up1'))
        dec.append(nn.StackingConv(dec.output_shape, 3, 256, 5, batch_norm=False, layer_name=subnet + '_block5',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up2'))
        dec.append(nn.StackingConv(dec.output_shape, 5, 128, 5, batch_norm=False, layer_name=subnet + '_block6',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up3'))
        dec.append(nn.StackingConv(dec.output_shape, 6, 128, 5, batch_norm=False, layer_name=subnet + '_block7',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))
        dec.append(nn.ConvolutionalLayer(dec.output_shape, 128, 5, activation='linear', layer_name=subnet+'_conv7'))
        if self.dropout:
            dec.append(nn.DropoutLayer(dec.output_shape, drop_prob=.5, layer_name=subnet + '_dropout7'))
        dec.append(nn.ActivationLayer(dec.output_shape, 'lrelu', subnet+'_act7', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up4'))
        dec.append(nn.StackingConv(dec.output_shape, 8, 64, 5, batch_norm=False, layer_name=subnet + '_block8',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))
        dec.append(nn.ConvolutionalLayer(dec.output_shape, 64, 5, activation='linear', layer_name=subnet + '_conv8'))
        if self.dropout:
            dec.append(nn.DropoutLayer(dec.output_shape, drop_prob=.5, layer_name=subnet + '_dropout8'))
        dec.append(nn.ActivationLayer(dec.output_shape, 'lrelu', subnet + '_act8', alpha=self.alpha))

        dec.append(nn.ConvolutionalLayer(dec.output_shape, 3, 5, activation='tanh', no_bias=False,
                                         layer_name=subnet + '_output'))

        self.model.append(dec)
コード例 #4
0
    def _make_layer(self, block, shape, planes, blocks, stride=1, name=''):
        layers = [
            block(shape,
                  planes,
                  stride,
                  activation=self.activation,
                  layer_name=name + '_0',
                  block=self.main_branch,
                  downsample=self.res_branch,
                  **self.kwargs)
        ]

        for i in range(1, blocks):
            layers.append(
                block(layers[-1].output_shape,
                      planes,
                      activation=self.activation,
                      layer_name=name + '_%d' % i,
                      block=self.main_branch,
                      downsample=self.res_branch,
                      **self.kwargs))
        return nn.Sequential(layers, layer_name=name)
コード例 #5
0
    def __init__(self,
                 input_shape,
                 n_latent,
                 num_filters,
                 norm_layer,
                 deterministic=False,
                 use_bias=False,
                 name='Latent Encoder'):
        super(LatentEncoder, self).__init__(input_shape=input_shape,
                                            layer_name=name)
        self.deterministic = deterministic
        self.enc = nn.Sequential(input_shape=input_shape,
                                 layer_name=name + '/enc')
        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           num_filters,
                           3,
                           stride=2,
                           no_bias=False,
                           activation='relu',
                           layer_name=name + '/conv1'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           2 * num_filters,
                           3,
                           stride=2,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv2'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm2'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act2'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           4 * num_filters,
                           3,
                           stride=2,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv3'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm3'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act3'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           8 * num_filters,
                           3,
                           stride=2,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv4'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm4'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act4'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           8 * num_filters,
                           4,
                           stride=1,
                           no_bias=not use_bias,
                           activation=None,
                           border_mode='valid',
                           layer_name=name + '/conv5'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm5'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act5'))

        self.enc_mu = nn.Conv2DLayer(self.enc.output_shape,
                                     n_latent,
                                     1,
                                     no_bias=False,
                                     activation=None,
                                     layer_name=name + '/mu')
        self.extend((self.enc, self.enc_mu))

        if not deterministic:
            self.enc_logvar = nn.Conv2DLayer(self.enc.output_shape,
                                             n_latent,
                                             1,
                                             no_bias=False,
                                             activation=None,
                                             layer_name=name + '/logvar')
            self.append(self.enc_logvar)
コード例 #6
0
    def __init__(self, input_shape, output_size):
        super(LSGAN, self).__init__(input_shape=input_shape,
                                    layer_name='LSGAN')
        s2, s4, s8, s16 = output_size // 2, output_size // 4, output_size // 8, output_size // 16

        subnet = 'generator'
        self.gen = nn.Sequential(input_shape=input_shape, layer_name=subnet)
        self.gen.append(
            nn.FullyConnectedLayer(self.gen.output_shape,
                                   256 * s16 * s16,
                                   activation='linear',
                                   layer_name=subnet + '/fc1',
                                   init=nn.Normal(.02)))
        self.gen.append(
            nn.ReshapingLayer(self.gen.output_shape, (-1, 256, s16, s16),
                              subnet + '/reshape'))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn1',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s8, s8),
                                            activation='linear',
                                            layer_name=subnet + '/deconv1',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn2',
                              activation='relu',
                              epsilon=1.1e-5))
        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s8, s8),
                                            stride=(1, 1),
                                            activation='linear',
                                            layer_name=subnet + '/deconv2',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn3',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s4, s4),
                                            activation='linear',
                                            layer_name=subnet + '/deconv3',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn4',
                              activation='relu',
                              epsilon=1.1e-5))
        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s4, s4),
                                            stride=(1, 1),
                                            activation='linear',
                                            layer_name=subnet + '/deconv4',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn5',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            128,
                                            3, (s2, s2),
                                            activation='linear',
                                            layer_name=subnet + '/deconv5',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn6',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            64,
                                            3, (output_size, output_size),
                                            activation='linear',
                                            layer_name=subnet + '/deconv6',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn7',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            3,
                                            3, (output_size, output_size),
                                            stride=(1, 1),
                                            activation='tanh',
                                            layer_name=subnet + '/output',
                                            init=nn.Normal(.02)))
        self.append(self.gen)

        subnet = 'discriminator'
        self.dis = nn.Sequential(input_shape=self.gen.output_shape,
                                 layer_name=subnet)
        self.dis.append(
            nn.ConvolutionalLayer(self.dis.output_shape,
                                  64,
                                  5,
                                  stride=2,
                                  activation='lrelu',
                                  no_bias=False,
                                  layer_name=subnet + '/first_conv',
                                  init=nn.TruncatedNormal(.02),
                                  alpha=.2))
        self.dis.append(
            nn.ConvNormAct(self.dis.output_shape,
                           64 * 2,
                           5,
                           stride=2,
                           activation='lrelu',
                           no_bias=False,
                           layer_name=subnet + '/conv1',
                           init=nn.TruncatedNormal(.02),
                           epsilon=1.1e-5,
                           alpha=.2))
        self.dis.append(
            nn.ConvNormAct(self.dis.output_shape,
                           64 * 4,
                           5,
                           stride=2,
                           activation='lrelu',
                           no_bias=False,
                           layer_name=subnet + '/conv2',
                           init=nn.TruncatedNormal(.02),
                           epsilon=1.1e-5,
                           alpha=.2))
        self.dis.append(
            nn.ConvNormAct(self.dis.output_shape,
                           64 * 8,
                           5,
                           stride=2,
                           activation='lrelu',
                           no_bias=False,
                           layer_name=subnet + '/conv3',
                           init=nn.TruncatedNormal(.02),
                           epsilon=1.1e-5,
                           alpha=.2))
        self.dis.append(
            nn.FullyConnectedLayer(self.dis.output_shape,
                                   1,
                                   layer_name=subnet + '/output',
                                   activation='linear'))
        self.append(self.dis)
コード例 #7
0
    def __init__(self, encoder, layer, name='Decoder'):
        super(Decoder,
              self).__init__(input_shape=encoder[layer - 1].output_shape,
                             layer_name=name)
        self.enc = encoder
        self.layer = layer
        dec = nn.Sequential(input_shape=encoder.output_shape, layer_name=name)
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           512,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv1_1'))
        dec.append(
            nn.UpsamplingLayer(dec.output_shape,
                               2,
                               method='nearest',
                               layer_name=name + '/up1'))

        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           512,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv2_1'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           512,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv2_2'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           512,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv2_3'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           512,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv2_4'))
        dec.append(
            nn.UpsamplingLayer(dec.output_shape,
                               2,
                               method='nearest',
                               layer_name=name + '/up2'))

        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           256,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv3_1'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           256,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv3_2'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           256,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv3_3'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           256,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv3_4'))
        dec.append(
            nn.UpsamplingLayer(dec.output_shape,
                               2,
                               method='nearest',
                               layer_name=name + '/up3'))

        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           128,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv4_1'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           128,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv4_2'))
        dec.append(
            nn.UpsamplingLayer(dec.output_shape,
                               2,
                               method='nearest',
                               layer_name=name + '/up4'))

        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           64,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv5_1'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           64,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           layer_name=name + '/conv5_2'))
        dec.append(
            nn.Conv2DLayer(dec.output_shape,
                           3,
                           3,
                           init=nn.GlorotUniform(),
                           border_mode='ref',
                           no_bias=False,
                           activation='tanh',
                           layer_name=name + '/output'))
        self.append(dec[len(encoder) - layer:])