Beispiel #1
0
    def __init__(self, config_file, **kwargs):
        super(DeepLTE, self).__init__(config_file, **kwargs)
        self.num_frames = self.config['model']['num_frames']
        self.order = self.config['model']['order']
        self.nodes = self.config['model']['nodes']
        self.targets = self.config['model']['targets']
        self.interps = self.config['model']['interps']
        self.alpha = self.config['model']['alpha']
        self.dropout = self.config['model']['dropout']
        self.perceptual_cost = self.config['model']['perceptual_cost']
        self.vgg_weight_file = self.config['model']['vgg_weight_file']
        self.input_tensor_shape = (None,) + self.input_shape[1:]

        enc = nn.model_zoo.resnet34(self.input_tensor_shape, 64, 'lrelu', False, False, name='encoder', alpha=self.alpha)
        self.model.append(enc)

        subnet = 'decoder'
        dec = nn.Sequential(input_shape=enc.output_shape, layer_name='decoder')
        dec.append(nn.ResizingLayer(dec.input_shape, 2, layer_name=subnet + '_up1'))
        dec.append(nn.StackingConv(dec.output_shape, 3, 256, 5, batch_norm=False, layer_name=subnet + '_block5',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up2'))
        dec.append(nn.StackingConv(dec.output_shape, 5, 128, 5, batch_norm=False, layer_name=subnet + '_block6',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up3'))
        dec.append(nn.StackingConv(dec.output_shape, 6, 128, 5, batch_norm=False, layer_name=subnet + '_block7',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))
        dec.append(nn.ConvolutionalLayer(dec.output_shape, 128, 5, activation='linear', layer_name=subnet+'_conv7'))
        if self.dropout:
            dec.append(nn.DropoutLayer(dec.output_shape, drop_prob=.5, layer_name=subnet + '_dropout7'))
        dec.append(nn.ActivationLayer(dec.output_shape, 'lrelu', subnet+'_act7', alpha=self.alpha))

        dec.append(nn.ResizingLayer(dec.output_shape, 2, layer_name=subnet + '_up4'))
        dec.append(nn.StackingConv(dec.output_shape, 8, 64, 5, batch_norm=False, layer_name=subnet + '_block8',
                                   He_init='normal', stride=(1, 1), activation='lrelu', alpha=self.alpha))
        dec.append(nn.ConvolutionalLayer(dec.output_shape, 64, 5, activation='linear', layer_name=subnet + '_conv8'))
        if self.dropout:
            dec.append(nn.DropoutLayer(dec.output_shape, drop_prob=.5, layer_name=subnet + '_dropout8'))
        dec.append(nn.ActivationLayer(dec.output_shape, 'lrelu', subnet + '_act8', alpha=self.alpha))

        dec.append(nn.ConvolutionalLayer(dec.output_shape, 3, 5, activation='tanh', no_bias=False,
                                         layer_name=subnet + '_output'))

        self.model.append(dec)
Beispiel #2
0
    def __init__(self,
                 input_shape,
                 fc=True,
                 num_classes=1000,
                 first_output=16,
                 growth_rate=12,
                 num_blocks=3,
                 depth=40,
                 dropout=False,
                 name='DenseNet'):
        super(DenseNet, self).__init__(input_shape=input_shape,
                                       layer_name=name)
        self.append(
            nn.ConvolutionalLayer(self.input_shape,
                                  first_output,
                                  3,
                                  activation='linear',
                                  layer_name=name + 'pre_conv'))
        n = (depth - 1) // num_blocks
        for b in range(num_blocks):
            self.append(
                nn.DenseBlock(self.output_shape,
                              num_conv_layer=n - 1,
                              growth_rate=growth_rate,
                              dropout=dropout,
                              layer_name=name + 'dense_block_%d' % b))
            if b < num_blocks - 1:
                self.append(
                    nn.DenseBlock(self.output_shape,
                                  True,
                                  None,
                                  None,
                                  dropout,
                                  layer_name=name +
                                  'dense_block_transit_%d' % b))

        self.append(
            nn.BatchNormLayer(self.output_shape, layer_name=name + 'post_bn'))
        if fc:
            self.append(
                nn.GlobalAveragePoolingLayer(input_shape,
                                             name + '_glbavgpooling'))
            self.append(
                nn.SoftmaxLayer(self.output_shape, num_classes,
                                name + '_softmax'))
Beispiel #3
0
    def __init__(self,
                 input_shape,
                 ch=512,
                 wscale=0.02,
                 output_dim=1,
                 layer_name='SNDCGANDis'):
        super(SNDCGANDiscriminator, self).__init__(input_shape=input_shape,
                                                   layer_name=layer_name)
        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch // 8,
                                  3,
                                  nn.Normal(wscale),
                                  False,
                                  stride=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv1',
                                  alpha=.2))
        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch // 4,
                                  4,
                                  nn.Normal(wscale),
                                  False,
                                  stride=2,
                                  border_mode=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv2',
                                  alpha=.2))

        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch // 4,
                                  3,
                                  nn.Normal(wscale),
                                  False,
                                  stride=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv3',
                                  alpha=.2))
        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch // 2,
                                  4,
                                  nn.Normal(wscale),
                                  False,
                                  stride=2,
                                  border_mode=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv4',
                                  alpha=.2))

        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch // 2,
                                  3,
                                  nn.Normal(wscale),
                                  False,
                                  stride=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv5',
                                  alpha=.2))
        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch,
                                  4,
                                  nn.Normal(wscale),
                                  False,
                                  stride=2,
                                  border_mode=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv6',
                                  alpha=.2))
        self.append(
            nn.ConvolutionalLayer(self.output_shape,
                                  ch,
                                  3,
                                  nn.Normal(wscale),
                                  False,
                                  stride=1,
                                  activation='lrelu',
                                  layer_name=layer_name + '/conv7',
                                  alpha=.2))

        self.append(
            nn.FullyConnectedLayer(self.output_shape,
                                   output_dim,
                                   nn.Normal(wscale),
                                   activation='linear',
                                   layer_name=layer_name + '/output'))
Beispiel #4
0
    def __init__(self, input_shape, output_size):
        super(LSGAN, self).__init__(input_shape=input_shape,
                                    layer_name='LSGAN')
        s2, s4, s8, s16 = output_size // 2, output_size // 4, output_size // 8, output_size // 16

        subnet = 'generator'
        self.gen = nn.Sequential(input_shape=input_shape, layer_name=subnet)
        self.gen.append(
            nn.FullyConnectedLayer(self.gen.output_shape,
                                   256 * s16 * s16,
                                   activation='linear',
                                   layer_name=subnet + '/fc1',
                                   init=nn.Normal(.02)))
        self.gen.append(
            nn.ReshapingLayer(self.gen.output_shape, (-1, 256, s16, s16),
                              subnet + '/reshape'))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn1',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s8, s8),
                                            activation='linear',
                                            layer_name=subnet + '/deconv1',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn2',
                              activation='relu',
                              epsilon=1.1e-5))
        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s8, s8),
                                            stride=(1, 1),
                                            activation='linear',
                                            layer_name=subnet + '/deconv2',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn3',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s4, s4),
                                            activation='linear',
                                            layer_name=subnet + '/deconv3',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn4',
                              activation='relu',
                              epsilon=1.1e-5))
        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            256,
                                            3, (s4, s4),
                                            stride=(1, 1),
                                            activation='linear',
                                            layer_name=subnet + '/deconv4',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn5',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            128,
                                            3, (s2, s2),
                                            activation='linear',
                                            layer_name=subnet + '/deconv5',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn6',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            64,
                                            3, (output_size, output_size),
                                            activation='linear',
                                            layer_name=subnet + '/deconv6',
                                            init=nn.Normal(.02)))
        self.gen.append(
            nn.BatchNormLayer(self.gen.output_shape,
                              subnet + '/bn7',
                              activation='relu',
                              epsilon=1.1e-5))

        self.gen.append(
            nn.TransposedConvolutionalLayer(self.gen.output_shape,
                                            3,
                                            3, (output_size, output_size),
                                            stride=(1, 1),
                                            activation='tanh',
                                            layer_name=subnet + '/output',
                                            init=nn.Normal(.02)))
        self.append(self.gen)

        subnet = 'discriminator'
        self.dis = nn.Sequential(input_shape=self.gen.output_shape,
                                 layer_name=subnet)
        self.dis.append(
            nn.ConvolutionalLayer(self.dis.output_shape,
                                  64,
                                  5,
                                  stride=2,
                                  activation='lrelu',
                                  no_bias=False,
                                  layer_name=subnet + '/first_conv',
                                  init=nn.TruncatedNormal(.02),
                                  alpha=.2))
        self.dis.append(
            nn.ConvNormAct(self.dis.output_shape,
                           64 * 2,
                           5,
                           stride=2,
                           activation='lrelu',
                           no_bias=False,
                           layer_name=subnet + '/conv1',
                           init=nn.TruncatedNormal(.02),
                           epsilon=1.1e-5,
                           alpha=.2))
        self.dis.append(
            nn.ConvNormAct(self.dis.output_shape,
                           64 * 4,
                           5,
                           stride=2,
                           activation='lrelu',
                           no_bias=False,
                           layer_name=subnet + '/conv2',
                           init=nn.TruncatedNormal(.02),
                           epsilon=1.1e-5,
                           alpha=.2))
        self.dis.append(
            nn.ConvNormAct(self.dis.output_shape,
                           64 * 8,
                           5,
                           stride=2,
                           activation='lrelu',
                           no_bias=False,
                           layer_name=subnet + '/conv3',
                           init=nn.TruncatedNormal(.02),
                           epsilon=1.1e-5,
                           alpha=.2))
        self.dis.append(
            nn.FullyConnectedLayer(self.dis.output_shape,
                                   1,
                                   layer_name=subnet + '/output',
                                   activation='linear'))
        self.append(self.dis)