def cin_resnet_block(input_shape, z_dim, padding, norm_layer, use_dropout,
                     use_bias, block_name, **kwargs):
    num_filters = input_shape[1]

    block = nn.Sequential(input_shape=input_shape, layer_name=block_name)
    block.append(
        nn.Conv2DLayer(input_shape,
                       num_filters,
                       3,
                       border_mode=padding,
                       no_bias=not use_bias,
                       activation=None,
                       layer_name=block_name + '/conv1'))
    block.append(norm_layer(input_shape, z_dim,
                            layer_name=block_name + '/CIN'))
    block.append(
        nn.ActivationLayer(block.output_shape,
                           layer_name=block_name + '/relu1'))
    if use_dropout:
        block.append(
            nn.DropoutLayer(block.output_shape,
                            .5,
                            layer_name=block_name + '/dropout'))

    block.append(
        nn.Conv2DLayer(block.output_shape,
                       num_filters,
                       3,
                       border_mode=padding,
                       no_bias=not use_bias,
                       activation=None,
                       layer_name=block_name + '/conv2'))
    block.append(nn.InstanceNormLayer(block.output_shape, block_name + '/IN'))
    return block
Exemple #2
0
    def __init__(self, config_file, **kwargs):
        super(DeepLTE, self).__init__(config_file, **kwargs)
        self.num_frames = self.config['model']['num_frames']
        self.order = self.config['model']['order']
        self.nodes = self.config['model']['nodes']
        self.targets = self.config['model']['targets']
        self.interps = self.config['model']['interps']
        self.alpha = self.config['model']['alpha']
        self.dropout = self.config['model']['dropout']
        self.perceptual_cost = self.config['model']['perceptual_cost']
        self.vgg_weight_file = self.config['model']['vgg_weight_file']
        self.input_tensor_shape = (None,) + self.input_shape[1:]

        enc = nn.model_zoo.resnet34(self.input_tensor_shape, 64, 'lrelu', False, False, name='encoder', alpha=self.alpha)
        self.model.append(enc)

        subnet = 'decoder'
        dec = nn.Sequential(input_shape=enc.output_shape, layer_name='decoder')
        dec.append(nn.ResizingLayer(dec.input_shape, 2, layer_name=subnet + '_up1'))
        dec.append(nn.StackingConv(dec.output_shape, 3, 256, 5, batch_norm=False, layer_name=subnet + '_block5',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up2'))
        dec.append(nn.StackingConv(dec.output_shape, 5, 128, 5, batch_norm=False, layer_name=subnet + '_block6',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up3'))
        dec.append(nn.StackingConv(dec.output_shape, 6, 128, 5, batch_norm=False, layer_name=subnet + '_block7',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))
        dec.append(nn.ConvolutionalLayer(dec.output_shape, 128, 5, activation='linear', layer_name=subnet+'_conv7'))
        if self.dropout:
            dec.append(nn.DropoutLayer(dec.output_shape, drop_prob=.5, layer_name=subnet + '_dropout7'))
        dec.append(nn.ActivationLayer(dec.output_shape, 'lrelu', subnet+'_act7', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up4'))
        dec.append(nn.StackingConv(dec.output_shape, 8, 64, 5, batch_norm=False, layer_name=subnet + '_block8',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))
        dec.append(nn.ConvolutionalLayer(dec.output_shape, 64, 5, activation='linear', layer_name=subnet + '_conv8'))
        if self.dropout:
            dec.append(nn.DropoutLayer(dec.output_shape, drop_prob=.5, layer_name=subnet + '_dropout8'))
        dec.append(nn.ActivationLayer(dec.output_shape, 'lrelu', subnet + '_act8', alpha=self.alpha))

        dec.append(nn.ConvolutionalLayer(dec.output_shape, 3, 5, activation='tanh', no_bias=False,
                                         layer_name=subnet + '_output'))

        self.model.append(dec)
    def __init__(self,
                 input_shape,
                 n_nodes,
                 use_sigmoid=False,
                 name='Latent Discriminator'):
        super(DiscriminatorLatent, self).__init__(input_shape=input_shape,
                                                  layer_name=name)
        self.append(
            nn.FCLayer(self.output_shape,
                       n_nodes,
                       activation=None,
                       layer_name=name + '/fc1'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn1',
                              activation='lrelu',
                              alpha=.2))

        self.append(
            nn.FCLayer(self.output_shape,
                       n_nodes,
                       activation=None,
                       layer_name=name + '/fc2'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn2',
                              activation='lrelu',
                              alpha=.2))

        self.append(
            nn.FCLayer(self.output_shape,
                       n_nodes,
                       activation=None,
                       layer_name=name + '/fc3'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn3',
                              activation='lrelu',
                              alpha=.2))

        self.append(
            nn.FCLayer(self.output_shape,
                       1,
                       activation=None,
                       layer_name=name + '/output'))

        if use_sigmoid:
            self.append(
                nn.ActivationLayer(self.output_shape, 'sigmoid',
                                   name + '/act'))
    def __init__(self,
                 input_shape,
                 n_latent,
                 num_filters,
                 norm_layer,
                 deterministic=False,
                 use_bias=False,
                 name='Latent Encoder'):
        super(LatentEncoder, self).__init__(input_shape=input_shape,
                                            layer_name=name)
        self.deterministic = deterministic
        self.enc = nn.Sequential(input_shape=input_shape,
                                 layer_name=name + '/enc')
        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           num_filters,
                           3,
                           stride=2,
                           no_bias=False,
                           activation='relu',
                           layer_name=name + '/conv1'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           2 * num_filters,
                           3,
                           stride=2,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv2'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm2'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act2'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           4 * num_filters,
                           3,
                           stride=2,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv3'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm3'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act3'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           8 * num_filters,
                           3,
                           stride=2,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv4'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm4'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act4'))

        self.enc.append(
            nn.Conv2DLayer(self.enc.output_shape,
                           8 * num_filters,
                           4,
                           stride=1,
                           no_bias=not use_bias,
                           activation=None,
                           border_mode='valid',
                           layer_name=name + '/conv5'))
        self.enc.append(norm_layer(self.enc.output_shape, name + '/norm5'))
        self.enc.append(
            nn.ActivationLayer(self.enc.output_shape, 'relu', name + '/act5'))

        self.enc_mu = nn.Conv2DLayer(self.enc.output_shape,
                                     n_latent,
                                     1,
                                     no_bias=False,
                                     activation=None,
                                     layer_name=name + '/mu')
        self.extend((self.enc, self.enc_mu))

        if not deterministic:
            self.enc_logvar = nn.Conv2DLayer(self.enc.output_shape,
                                             n_latent,
                                             1,
                                             no_bias=False,
                                             activation=None,
                                             layer_name=name + '/logvar')
            self.append(self.enc_logvar)
    def __init__(self,
                 input_shape,
                 num_filters=64,
                 norm_layer=partial(nn.BatchNormLayer, activation=None),
                 use_sigmoid=False,
                 use_bias=True,
                 name='Discriminator Edges'):
        super(DiscriminatorEdges, self).__init__(input_shape=input_shape,
                                                 layer_name=name)
        self.append(
            nn.Conv2DLayer(self.output_shape,
                           num_filters,
                           3,
                           stride=2,
                           border_mode='half',
                           no_bias=not use_bias,
                           activation='lrelu',
                           alpha=.2,
                           layer_name=name + '/conv1'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           2 * num_filters,
                           3,
                           stride=2,
                           border_mode='half',
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv2'))
        self.append(norm_layer(self.output_shape, name + '/bn2'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               activation='lrelu',
                               alpha=.2,
                               layer_name=name + '/act2'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           4 * num_filters,
                           3,
                           border_mode='half',
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv3'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/bn3'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               activation='lrelu',
                               alpha=.2,
                               layer_name=name + '/act3'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           4 * num_filters,
                           3,
                           border_mode='half',
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv4'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/bn4'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               activation='lrelu',
                               alpha=.2,
                               layer_name=name + '/act4'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           1,
                           4,
                           border_mode='valid',
                           layer_name=name + '/output',
                           activation=None))

        if use_sigmoid:
            self.append(
                nn.ActivationLayer(self.output_shape, 'sigmoid',
                                   name + '/sigmoid'))
    def __init__(self,
                 input_shape,
                 n_latent,
                 num_filters=64,
                 norm_layer=nn.ConditionalInstanceNorm2DLayer,
                 use_sigmoid=False,
                 use_bias=True,
                 name='CIN Discriminator'):
        super(CINDiscriminator, self).__init__(input_shape=input_shape,
                                               layer_name=name)
        self.append(
            nn.Conv2DLayer(self.output_shape,
                           num_filters,
                           4,
                           stride=2,
                           border_mode=1,
                           no_bias=not use_bias,
                           activation='lrelu',
                           alpha=.2,
                           layer_name=name + '/conv1'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           2 * num_filters,
                           4,
                           stride=2,
                           border_mode=1,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv2'))
        self.append(norm_layer(self.output_shape, n_latent, name + '/bn2'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               activation='lrelu',
                               alpha=.2,
                               layer_name=name + '/act2'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           4 * num_filters,
                           4,
                           border_mode=1,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv3'))
        self.append(
            norm_layer(self.output_shape, n_latent, layer_name=name + '/bn3'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               activation='lrelu',
                               alpha=.2,
                               layer_name=name + '/act3'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           5 * num_filters,
                           4,
                           border_mode=1,
                           no_bias=not use_bias,
                           activation=None,
                           layer_name=name + '/conv4'))
        self.append(
            norm_layer(self.output_shape, n_latent, layer_name=name + '/bn4'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               activation='lrelu',
                               alpha=.2,
                               layer_name=name + '/act4'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           1,
                           4,
                           border_mode=1,
                           layer_name=name + '/output',
                           activation=None))

        if use_sigmoid:
            self.append(
                nn.ActivationLayer(self.output_shape, 'sigmoid',
                                   name + '/sigmoid'))
    def __init__(self,
                 input_shape,
                 num_filters,
                 output_dim,
                 norm_layer=partial(nn.InstanceNormLayer, activation=None),
                 use_dropout=False,
                 padding='ref',
                 name='Resnet Generator'):
        super(ResnetGen, self).__init__(input_shape=input_shape,
                                        layer_name=name)
        self.append(
            nn.Conv2DLayer(input_shape,
                           num_filters,
                           7,
                           border_mode='ref',
                           activation=None,
                           no_bias=False,
                           layer_name=name + '/conv1'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/cin1'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               'relu',
                               layer_name=name + '/relu1'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           num_filters * 2,
                           3,
                           border_mode='half',
                           activation=None,
                           no_bias=False,
                           layer_name=name + '/conv2'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/cin2'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               'relu',
                               layer_name=name + '/relu2'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           num_filters * 4,
                           3,
                           stride=2,
                           border_mode='half',
                           activation=None,
                           no_bias=False,
                           layer_name=name + '/conv3'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/cin3'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               'relu',
                               layer_name=name + '/relu3'))

        for i in range(3):
            self.append(
                ResnetBlock(self.output_shape, padding, norm_layer,
                            use_dropout, True,
                            name + '/ResBlock %d' % (i + 1)))

        self.append(
            nn.TransposedConvolutionalLayer(self.output_shape,
                                            2 * num_filters,
                                            3,
                                            stride=(2, 2),
                                            padding='half',
                                            activation=None,
                                            layer_name=name + '/deconv'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/cin4'))
        self.append(
            nn.ActivationLayer(self.output_shape, 'relu', name + '/relu4'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           num_filters,
                           3,
                           border_mode='half',
                           activation=None,
                           no_bias=False,
                           layer_name=name + '/conv5'))
        self.append(norm_layer(self.output_shape, layer_name=name + '/cin5'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               'relu',
                               layer_name=name + '/relu5'))

        self.append(
            nn.Conv2DLayer(self.output_shape,
                           output_dim,
                           7,
                           activation=None,
                           layer_name=name + '/output'))
        self.append(
            nn.ActivationLayer(self.output_shape,
                               'tanh',
                               layer_name=name + '/output_act'))
Exemple #8
0
    def __init__(self,
                 input_shape,
                 num_classes=10,
                 name='vgg19 mean interp padding'):
        super(VGG19MeanInterpPadding, self).__init__(input_shape=input_shape,
                                                     layer_name=name)
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         64,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv1'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn1'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         64,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv2'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn2',
                              activation=None))
        self.append(
            nn.MaxPoolingLayer(self.output_shape, (2, 2),
                               layer_name=name + '/maxpool0'))
        self.append(
            nn.ActivationLayer(self.output_shape, 'relu', name + '/relu2'))

        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         128,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv3'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn3'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         128,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv4'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn4',
                              activation=None))
        self.append(
            nn.MaxPoolingLayer(self.output_shape, (2, 2),
                               layer_name=name + '/maxpool1'))
        self.append(
            nn.ActivationLayer(self.output_shape, 'relu', name + '/relu4'))

        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         256,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv5'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn5'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         256,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv6'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn6'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         256,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv7'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn7'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         256,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv7_1'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn7_1',
                              activation=None))
        self.append(
            nn.MaxPoolingLayer(self.output_shape, (2, 2),
                               layer_name=name + '/maxpool2'))
        self.append(
            nn.ActivationLayer(self.output_shape, 'relu', name + '/relu8'))

        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv8'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn8'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv9'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn9'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv10'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn10'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv10_1'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn10_1',
                              activation=None))
        self.append(
            nn.MaxPoolingLayer(self.output_shape, (2, 2),
                               layer_name=name + '/maxpool3'))
        self.append(
            nn.ActivationLayer(self.output_shape, 'relu', name + '/relu11'))

        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv11'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn11'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv12'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn12'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv13'))
        self.append(nn.BatchNormLayer(self.output_shape, name + '/bn13'))
        self.append(
            Conv2DMeanInterpPaddingLayer(self.output_shape,
                                         512,
                                         3,
                                         activation=None,
                                         layer_name=name + '/conv13_1'))
        self.append(
            nn.BatchNormLayer(self.output_shape,
                              name + '/bn13_1',
                              activation=None))
        self.append(
            nn.MaxPoolingLayer(self.output_shape, (2, 2),
                               layer_name=name + '/maxpool4'))
        self.append(
            nn.ActivationLayer(self.output_shape, 'relu', name + '/relu14'))

        self.append(
            nn.SoftmaxLayer(self.output_shape, num_classes, name + '/softmax'))