コード例 #1
0
ファイル: wgan.py プロジェクト: nathinal/Theano-MPI
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
コード例 #2
0
ファイル: ali_wgan.py プロジェクト: corrigon/AutoEncoders
 def build_decoder_28(self, in_layer):
     lrelu = LeakyRectify(0.2)
     # fully-connected layer
     layer = batch_norm(DenseLayer(
         in_layer, 1024, nonlinearity=lrelu))  # original with relu
     # project and reshape
     layer = batch_norm(DenseLayer(
         layer, 256 * 7 * 7, nonlinearity=lrelu))  # original with relu
     layer = ReshapeLayer(layer, ([0], 256, 7, 7))
     # two fractional-stride convolutions
     layer = batch_norm(
         Deconv2DLayer(layer,
                       128,
                       5,
                       stride=2,
                       crop='same',
                       output_size=14,
                       nonlinearity=lrelu))  # original with relu
     return Deconv2DLayer(layer,
                          self.channels,
                          5,
                          stride=2,
                          crop='same',
                          output_size=28,
                          nonlinearity=None)
コード例 #3
0
def build_critic(input_var=None, model_name='wgan'):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(
        Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(
        Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))

    # output layer
    if model_name == 'dcgan':
        layer = DenseLayer(layer, 1, nonlinearity=sigmoid)
    elif model_name == 'wgan':
        layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    elif model_name == 'lsgan':
        layer = DenseLayer(layer, 1, nonlinearity=None)

    print("critic output:", layer.output_shape)
    return layer
コード例 #4
0
 def build_encoder_conv2d_32_hidden(self, l_input):
     from lasagne.nonlinearities import sigmoid
     from lasagne.nonlinearities import LeakyRectify
     from lasagne.layers import Conv2DLayer
     from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
     try:
         from lasagne.layers.dnn import batch_norm_dnn as batch_norm
     except ImportError:
         from lasagne.layers import batch_norm
     # input: 3x28x28dim
     lrelu = LeakyRectify(0.2)
     layer = batch_norm(
         Conv2DLayer(l_input,
                     64,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     layer = batch_norm(
         Conv2DLayer(layer,
                     128,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     return ReshapeLayer(layer, ([0], 6272))
コード例 #5
0
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 3, 64, 64)
    layer = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)
    layer = GAN.GaussianNoiseLayer(layer, sigma=0.5)
    # two convolutions
    layer = batch_norm(
        Conv2DLayer(layer, 64, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(
        Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(
        Conv2DLayer(layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(
        Conv2DLayer(layer, 512, 5, stride=2, pad='same', nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear and without bias)
    layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    print("critic output:", layer.output_shape)
    return layer
コード例 #6
0
def BatchNorm(layer, include=True, mean=0.0, std=0):
    if include:
        if std == 0:
            return batch_norm(layer)
        return batch_norm(
            layer,
            mean=floatX(np.zeros(layer.output_shape[1])+mean),
            inv_std=floatX(np.random.normal(0.0, std, layer.output_shape[1])))
    else:
        return layer
コード例 #7
0
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer, MaxPool2DLayer, dropout)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, rectify
    lrelu = LeakyRectify(0.2)
    layer = InputLayer(shape=(None, 1, 128, 128),
                       input_var=input_var,
                       name='d_in_data')

    print("MNIST critic")
    # convolution layers
    layer = batch_norm(
        Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(
        Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(
        Conv2DLayer(layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    """
    print("naive CREPE critic")
    # words from sequences with 7 characters
    # each filter learns a word representation of shape M x 1
    layer = Conv2DLayer(
        layer, 128, (128, 7), nonlinearity=lrelu)
    layer = MaxPool2DLayer(layer, (1, 3))
    # temporal convolution, 7-gram
    layer = Conv2DLayer(
        layer, 128, (1, 7), nonlinearity=lrelu)
    layer = MaxPool2DLayer(layer, (1, 3))
    # temporal convolution, 3-gram
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    # fully-connected layers
    layer = DenseLayer(layer, 1024, nonlinearity=rectify)
    layer = DenseLayer(layer, 1024, nonlinearity=rectify)
    """
    layer = DenseLayer(layer, 1, nonlinearity=lrelu)
    print("critic output:", layer.output_shape)
    return layer
コード例 #8
0
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer, MaxPool2DLayer, dropout)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, rectify
    lrelu = LeakyRectify(0.2)
    layer = InputLayer(
        shape=(None, 1, 128, 128), input_var=input_var, name='d_in_data')

    print("MNIST critic")
    # convolution layers
    layer = batch_norm(Conv2DLayer(
        layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(
        layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(
        layer, 128, 5, stride=2, pad='same', nonlinearity=lrelu))
    """
    print("naive CREPE critic")
    # words from sequences with 7 characters
    # each filter learns a word representation of shape M x 1
    layer = Conv2DLayer(
        layer, 128, (128, 7), nonlinearity=lrelu)
    layer = MaxPool2DLayer(layer, (1, 3))
    # temporal convolution, 7-gram
    layer = Conv2DLayer(
        layer, 128, (1, 7), nonlinearity=lrelu)
    layer = MaxPool2DLayer(layer, (1, 3))
    # temporal convolution, 3-gram
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    layer = Conv2DLayer(
        layer, 128, (1, 3), nonlinearity=lrelu)
    # fully-connected layers
    layer = DenseLayer(layer, 1024, nonlinearity=rectify)
    layer = DenseLayer(layer, 1024, nonlinearity=rectify)
    """
    layer = DenseLayer(layer, 1, nonlinearity=lrelu)
    print("critic output:", layer.output_shape)
    return layer
コード例 #9
0
def BatchNorm(layer, do_batch_norm=False):
    if do_batch_norm:
        try:
            from lasagne.layers.dnn import batch_norm_dnn as batch_norm
        except ImportError:
            from lasagne.layers import batch_norm
        return batch_norm(layer)
    return layer
コード例 #10
0
def define_patch_net(num_layers=4):
    net = {}
    print("Discriminator layer shapes:")
    net['input'] = ll.InputLayer(shape=(None, 3, IMAGE_SHAPE[0],
                                        IMAGE_SHAPE[1]))

    leaky_relu = lasagne.nonlinearities.LeakyRectify(0.2)

    # net['stand'] = ll.standardize(net['input'], offset=np.array([0, 0, 0], dtype='float32'),
    #                         scale=np.array([128.0, 128.0, 128.0], dtype='float32'))

    prev_layer_name = 'input'

    for i_layer in range(num_layers):
        layer_name = 'conv_%i' % (i_layer + 1)
        if i_layer != 0:
            net[layer_name] = batch_norm(
                Conv2DLayer(net[prev_layer_name],
                            num_filters=min(512, 64 * (2**i_layer)),
                            filter_size=(4, 4),
                            stride=(2, 2),
                            nonlinearity=leaky_relu))
        else:
            net[layer_name] = batch_norm(
                Conv2DLayer(net[prev_layer_name],
                            num_filters=64 * (2**i_layer),
                            filter_size=(4, 4),
                            stride=(2, 2),
                            nonlinearity=leaky_relu))
        print(lasagne.layers.get_output_shape(net[layer_name]))
        prev_layer_name = layer_name

    net['out'] = batch_norm(
        Conv2DLayer(net[prev_layer_name],
                    filter_size=(1, 1),
                    num_filters=1,
                    nonlinearity=lasagne.nonlinearities.sigmoid))
    print(lasagne.layers.get_output_shape(net['out']))
    # net['out'] = lasagne.layers.ReshapeLayer(net['patch'], shape=(batch_size * 2, -1))
    #
    # print(lasagne.layers.get_output_shape(net['out']))

    return net
コード例 #11
0
ファイル: ali_wgan.py プロジェクト: corrigon/AutoEncoders
 def build_encoder_28(self, layer_in, encoder_mode='encoder_28'):
     lrelu = LeakyRectify(0.2)
     layer = batch_norm(
         Conv2DLayer(layer_in,
                     128,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     layer = ReshapeLayer(layer, ([0], 6272 * 2))
     layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
     return batch_norm(DenseLayer(layer, self.z_dim, nonlinearity=None))
コード例 #12
0
ファイル: net.py プロジェクト: zymale/yousan.ai
def simpleconv3(input_var=None):
    network = lasagne.layers.InputLayer(shape=(None, 3, 48, 48),
                                        input_var=input_var)

    network = lasagne.layers.Conv2DLayer(
        network,
        num_filters=12,
        filter_size=(3, 3),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform())
    network = batch_norm(network)
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))

    network = lasagne.layers.Conv2DLayer(
        network,
        num_filters=24,
        filter_size=(3, 3),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform())
    network = batch_norm(network)
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
    network = lasagne.layers.Conv2DLayer(
        network,
        num_filters=48,
        filter_size=(3, 3),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform())
    network = batch_norm(network)
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))

    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.5),
        num_units=128,
        nonlinearity=lasagne.nonlinearities.rectify)

    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.5),
        num_units=2,
        nonlinearity=lasagne.nonlinearities.softmax)

    return network
コード例 #13
0
ファイル: wgan.py プロジェクト: uoguelph-mlrg/Theano-MPI
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear and without bias)
    layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    print ("critic output:", layer.output_shape)
    return layer
コード例 #14
0
ファイル: ali_wgan.py プロジェクト: corrigon/AutoEncoders
 def build_critic_28(self, in_x_layer, in_z_layer):
     # two convolutions
     lrelu = LeakyRectify(0.2)
     layer = batch_norm(
         Conv2DLayer(in_x_layer,
                     128,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))
     layer = ReshapeLayer(layer, ([0], 6272 * 2))
     # fully-connected layer
     layer = L.ConcatLayer([layer, in_z_layer])
     layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
     # output layer (linear and without bias)
     return DenseLayer(layer, 1, nonlinearity=None, b=None)
コード例 #15
0
def build_critic(input_var=None, verbose=False):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 512, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    if verbose: print ("critic output:", layer.output_shape)
    return layer
コード例 #16
0
 def build_encoder_conv2d_128_hidden(self, l_input):
     from lasagne.nonlinearities import sigmoid
     from lasagne.nonlinearities import LeakyRectify
     from lasagne.layers import Conv2DLayer
     from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
     try:
         from lasagne.layers.dnn import batch_norm_dnn as batch_norm
     except ImportError:
         from lasagne.layers import batch_norm
     # input: 3x128x128dim
     lrelu = LeakyRectify(0.2)
     layer = batch_norm(
         Conv2DLayer(l_input,
                     128,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     # shape 128x64x64
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     # shape 256x32x32
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     7,
                     stride=4,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     # shape 256x8x8=8192
     return ReshapeLayer(layer, ([0], 8192 * 2))
コード例 #17
0
 def build_decoder_conv2d_128_local_hidden(self, l_Z, params):
     from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
     from LocallyConnected2DLayer import LocallyConnected2DLayer
     try:
         from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
     except ImportError:
         raise ImportError(
             "Your Lasagne is too old. Try the bleeding-edge "
             "version: http://lasagne.readthedocs.io/en/latest/"
             "user/installation.html#bleeding-edge-version")
     try:
         from lasagne.layers.dnn import batch_norm_dnn as batch_norm
     except ImportError:
         from lasagne.layers import batch_norm
     from lasagne.nonlinearities import sigmoid
     from lasagne.nonlinearities import LeakyRectify
     lrelu = LeakyRectify(0.2)
     # fully-connected layer
     layer = batch_norm(
         DenseLayer(
             l_Z,
             4096,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w1'],
             b=nn.init.Constant(0.) if params is None else params['b1'],
         ))  # original with relu
     _params = {}
     _params['w1'] = layer.input_layer.input_layer.W
     _params['b1'] = layer.input_layer.input_layer.b
     # project and reshape
     # shape 1024
     layer = batch_norm(
         DenseLayer(
             layer,
             256 * 8 * 8,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w2'],
             b=nn.init.Constant(0.) if params is None else params['b2'],
             # shape 256x8x8
         ))  # original with relu
     _params['w2'] = layer.input_layer.input_layer.W
     _params['b2'] = layer.input_layer.input_layer.b
     layer = ReshapeLayer(layer, ([0], 256, 8, 8))
     # two fractional-stride convolutions
     layer = batch_norm(
         Deconv2DLayer(
             layer,
             256,
             7,
             stride=4,
             crop='same',
             output_size=32,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w3'],
             b=nn.init.Constant(0.)
             if params is None else params['b3']))  # original with relu
     # shape 256x32x32
     _params['w3'] = layer.input_layer.input_layer.W
     _params['b3'] = layer.input_layer.input_layer.b
     _layer = batch_norm(
         Deconv2DLayer(
             layer,
             64,
             9,
             stride=4,
             crop='same',
             output_size=128,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w4'],
             b=nn.init.Constant(0.)
             if params is None else params['b4']))  # original with relu
     # shape 128x64x64
     _params['w4'] = _layer.input_layer.input_layer.W
     _params['b4'] = _layer.input_layer.input_layer.b
     layer = LocallyConnected2DLayer(
         _layer,
         self.channels,
         7,
         stride=1,
         pad='same',
         nonlinearity=sigmoid,
         untie_biases=True,
         W=nn.init.GlorotUniform() if params is None else params['w_mu'],
         b=nn.init.Constant(0.) if params is None else params['b_mu'])
     _params['w_mu'] = layer.W
     _params['b_mu'] = layer.b
     l_dec_mu = ReshapeLayer(
         layer, ([0], self.width * self.height * self.channels))
     # relu_shift is for numerical stability - if training data has any
     # dimensions where stdev=0, allowing logsigma to approach -inf
     # will cause the loss function to become NAN. So we set the limit
     # stdev >= exp(-1 * relu_shift)
     relu_shift = 10
     layer = LocallyConnected2DLayer(
         _layer,
         self.channels,
         7,
         stride=1,
         pad='same',
         nonlinearity=lambda a: T.nnet.relu(a + relu_shift) - relu_shift,
         W=nn.init.GlorotUniform()
         if params is None else params['w_logsigma'],
         b=nn.init.Constant(0.) if params is None else params['b_logsigma'])
     _params['w_logsigma'] = layer.W
     _params['b_logsigma'] = layer.b
     l_dec_logsigma = ReshapeLayer(
         layer, ([0], self.width * self.height * self.channels))
     # shape 3x128x128
     return l_dec_mu, l_dec_logsigma, _params
コード例 #18
0
def build_model(batch_size=BATCH_SIZE):
    """ Compile net architecture """
    nonlin = lasagne.nonlinearities.rectify

    # --- input layers ---
    l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0],
                                            INPUT_SHAPE[1], INPUT_SHAPE[2]),
                                     name='Input')
    net = l_in

    nf = 64

    # --- conv layers ---
    net = Conv2DLayer(net,
                      num_filters=nf,
                      filter_size=5,
                      stride=2,
                      pad=2,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=2)
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=2 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=2 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=2)
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=4 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.3)
    net = Conv2DLayer(net,
                      num_filters=4 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.3)
    net = Conv2DLayer(net,
                      num_filters=6 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.3)
    net = Conv2DLayer(net,
                      num_filters=6 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=2)
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=(1, 2))
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=(1, 2))
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      pad=0,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.5)
    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=1,
                      pad=0,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.5)

    # --- feed forward part ---
    net = Conv2DLayer(net,
                      num_filters=41,
                      filter_size=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=None)
    net = batch_norm(net, alpha=0.1)
    net = GlobalPoolLayer(net)
    net = FlattenLayer(net)
    net = NonlinearityLayer(net, nonlinearity=lasagne.nonlinearities.softmax)

    return net
コード例 #19
0
def build_critic(input_var=None, cond_var=None, n_conds=0, arch=0,
                 with_BatchNorm=True, loss_type='wgan'):
    from lasagne.layers import (
        InputLayer, Conv2DLayer, DenseLayer, MaxPool2DLayer, concat,
        dropout, flatten)
    from lasagne.nonlinearities import rectify, LeakyRectify
    from lasagne.init import GlorotUniform  # Normal
    lrelu = LeakyRectify(0.2)
    layer = InputLayer(
        shape=(None, 1, 128, 128), input_var=input_var, name='d_in_data')
    # init = Normal(0.02, 0.0)
    init = GlorotUniform()

    if cond_var:
        # class: from data or from generator input
        layer_cond = InputLayer(
            shape=(None, n_conds), input_var=cond_var, name='d_in_condition')
        layer_cond = BatchNorm(DenseLayer(
            layer_cond, 1024, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
    if arch == 'dcgan':
        # DCGAN inspired
        layer = BatchNorm(Conv2DLayer(
            layer, 32, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 512, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
    elif arch == 'cont-enc':
        # convolution layers
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 512, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
    elif arch == 'mnist':
        # Jan Schluechter's MNIST discriminator
        # convolution layers
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        # layer = BatchNorm(Conv2DLayer(
        #     layer, 128, 5, stride=2, pad='same', W=init, b=None,
        #      nonlinearity=lrelu), with_BatchNorm)
        # fully-connected layer
        # layer = BatchNorm(DenseLayer(
        #     layer, 1024, W=init, b=None, nonlinearity=lrelu), with_BatchNorm)
    elif arch == 'lsgan':
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
    elif arch == 'crepe':
        # CREPE
        # form words from sequence of characters
        layer = BatchNorm(Conv2DLayer(
            layer, 1024, (128, 7), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = MaxPool2DLayer(layer, (1, 3))
        # temporal convolution, 7-gram
        layer = BatchNorm(Conv2DLayer(
            layer, 512, (1, 7), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = MaxPool2DLayer(layer, (1, 3))
        # temporal convolution, 3-gram
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = flatten(layer)
        # fully-connected layers
        layer = dropout(DenseLayer(
            layer, 1024, W=init, b=None, nonlinearity=rectify))
        layer = dropout(DenseLayer(
            layer, 1024, W=init, b=None, nonlinearity=rectify))
    else:
        raise Exception("Model architecture {} is not supported".format(arch))
        # output layer (linear and without bias)
    if cond_var is not None:
        layer = DenseLayer(layer, 1024, nonlinearity=lrelu, b=None)
        layer = concat([layer, layer_cond])

    layer = DenseLayer(layer, 1, b=None, nonlinearity=None)
    print("Critic output:", layer.output_shape)
    return layer
コード例 #20
0
def build_generator(input_var, noise_size, cond_var=None, n_conds=0, arch=0,
                    with_BatchNorm=True, batch_size=None, n_steps=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, concat
    from lasagne.layers import Upscale2DLayer, Conv2DLayer
    from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    from lasagne.nonlinearities import LeakyRectify, rectify
    from lasagne.init import GlorotUniform, Normal, Orthogonal

    # non_lin = LeakyRectify(0.01)
    non_lin = rectify
    # init = Orthogonal(np.sqrt(2/(1+0.01**2)))
    init = Normal(0.02, 0.0)
    # init = GlorotUniform()

    layer = InputLayer(shape=(batch_size, noise_size), input_var=input_var)
    if cond_var is not None:
        layer = BatchNorm(DenseLayer(
            layer, noise_size, nonlinearity=non_lin), with_BatchNorm)
        layer = concat([
            layer, InputLayer(shape=(batch_size, n_conds), input_var=cond_var)])
    if arch == 'dcgan':
        # DCGAN
        layer = BatchNorm(DenseLayer(
            layer, 1024*4*4, W=init, b=None, nonlinearity=non_lin))
        layer = ReshapeLayer(layer, ([0], 1024, 4, 4))
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=8, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=16, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=32, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 64, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=64, nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=128, nonlinearity=tanh_temperature)
    elif arch == 'mnist':
        # Jan Schluechter MNIST generator
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(
            layer, 1024*8*8, W=init, b=None), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 1024, 8, 8))
        # fractional-stride convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 5, stride=2, crop='same', W=init, b=None,
            output_size=16, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', W=init, b=None,
            output_size=32, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 5, stride=2, crop='same', W=init, b=None,
            output_size=64, nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop='same', W=init, b=None,
            output_size=128, nonlinearity=tanh_temperature)
    elif 'cont-enc':
        # build generator from concatenated prefix and noise features
        layer = ReshapeLayer(layer, ([0], layer.output_shape[1], 1, 1))
        layer = BatchNorm(Deconv2DLayer(
            layer, 1024, 4, stride=1, crop=0, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 4, stride=2, crop=1, W=init,
            nonlinearity=tanh_temperature)
    elif 'lsgan':
        layer = batch_norm(DenseLayer(layer, 1024))
        layer = batch_norm(DenseLayer(layer, 1024*8*8))
        layer = ReshapeLayer(layer, ([0], 1024, 8, 8))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=16))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=32))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=64))
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop='same', output_size=128,
            nonlinearity=tanh_temperature)
    elif arch == 2:
        # non-overlapping transposed convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 256*36*36), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 256, 36, 36))
        # two fractional-stride convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 4, stride=2, crop='full', b=None, nonlinearity=non_lin),
            with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 8, stride=2, crop='full', b=None,
            nonlinearity=tanh_temperature)
    elif arch == 3:
        # resize-convolution, more full layer weights less convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 32*68*68), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 32, 68, 68))
        # resize-convolutions
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid'), with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2))
        layer = Conv2DLayer(
            layer, 1, 5, stride=1, pad='valid', nonlinearity=tanh_temperature)
    elif arch == 4:
        # resize-convolution, less full layer weights more convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 128*18*18), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 128, 18, 18))
        # resize-convolutions
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid', nonlinearity=non_lin),
            with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid', nonlinearity=non_lin),
            with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = Conv2DLayer(
            layer, 1, 5, stride=1, pad='valid',
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_up':
        # CREPE transposed with upscaling
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 2**15*1*3), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 2**15, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Upscale2DLayer(layer, (1, 3), mode='repeat')
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, (1, 9), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Upscale2DLayer(layer, (1, 3), mode='repeat')
        layer = Deconv2DLayer(
            layer, 1, (128, 6), stride=1, crop=0, W=init, b=None,
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_noup_a':
        # CREPE transposed no upscaling
        # fully-connected layer
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(
            layer, 1024*1*3, W=init, b=None), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 1024, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, (1, 7), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 1024, (128, 7), stride=3, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, (1, 8), stride=1, crop=0, W=init, b=None,
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_noup_b':
        # CREPE transposed no upscaling
        # fully-connected layer
        layer = BatchNorm(DenseLayer(layer, 1024))
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 1024*1*3))
        layer = ReshapeLayer(layer, ([0], 1024, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0,
            nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=3, crop=0, nonlinearity=non_lin))
        layer = Deconv2DLayer(
            layer, 512, (1, 9), stride=1, crop=0, nonlinearity=non_lin)
        layer = Deconv2DLayer(
            layer, 1, (128, 8), stride=3, crop=0, nonlinearity=tanh_temperature)
    else:
        return None

    print("Generator output:", layer.output_shape)
    return layer
コード例 #21
0
def build_generator_lstm(input_var, noise_size, cond_var=None, n_conds=0,
                         arch='lstm', with_BatchNorm=True, batch_size=None,
                         n_steps=None):
    from lasagne.layers import (
        InputLayer, DenseLayer, LSTMLayer, ReshapeLayer, DimshuffleLayer,
        concat, ExpressionLayer, NonlinearityLayer, DropoutLayer)

    from lasagne.init import Constant, HeNormal
    from lasagne.nonlinearities import rectify, softmax
    non_lin = rectify

    layer = InputLayer(
        shape=(batch_size, n_steps, noise_size), input_var=input_var)
    if cond_var is not None:
        layer = BatchNorm(DenseLayer(
            layer, noise_size, nonlinearity=non_lin), with_BatchNorm)
        layer = concat(
            [layer, InputLayer(shape=(batch_size, n_steps, n_conds),
                               input_var=cond_var)])
    if arch == 'lstm':
        layer = batch_norm(DenseLayer(layer, 1024, num_leading_axes=2))
        # recurrent layers for bidirectional network
        l_forward_noise = BatchNorm(LSTMLayer(
            layer, 512, learn_init=True, grad_clipping=100,
            only_return_final=False), with_BatchNorm)
        l_backward_noise = BatchNorm(LSTMLayer(
            layer, 512, learn_init=True, grad_clipping=100,
            only_return_final=False, backwards=True), with_BatchNorm)
        layer = concat([l_forward_noise, l_backward_noise], axis=2)
        # dense layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, num_leading_axes=2), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 128, num_leading_axes=2), with_BatchNorm)
        # reshape to apply softmax per timestep
        layer = ReshapeLayer(layer, (-1, [2]))
        layer = NonlinearityLayer(layer, softmax)
        layer = ReshapeLayer(layer, (input_var.shape[0], -1, [1]))
        layer = DimshuffleLayer(layer, (0, 'x', 2, 1))
        layer = ExpressionLayer(layer, lambda X: X*2 - 1)
    elif arch == 1:
        # input layers
        l_in = InputLayer(
            shape=params['input_shape'], input_var=params['input_var'],
            name='g_in')
        l_noise = InputLayer(
            shape=params['noise_shape'], input_var=params['noise_var'],
            name='g_noise')
        l_cond = InputLayer(
            shape=params['cond_shape'], input_var=params['cond_var'],
            name='g_cond')
        l_mask = InputLayer(
            shape=params['mask_shape'], input_var=params['mask_var'],
            name='g_mask')

        # recurrent layers for bidirectional network
        l_forward_data = LSTMLayer(
            l_in, params['n_units'][0], mask_input=l_mask,
            ingate=gate_params, forgetgate=gate_params,
            cell=cell_params, outgate=gate_params,
            learn_init=True, grad_clipping=params['grad_clip'],
            only_return_final=False,
            nonlinearity=params['non_linearities'][0])
        l_forward_noise = LSTMLayer(
            l_noise, params['n_units'][0], mask_input=l_mask,
            ingate=gate_params, forgetgate=gate_params,
            cell=cell_params, outgate=gate_params,
            learn_init=True, grad_clipping=params['grad_clip'],
            only_return_final=False,
            nonlinearity=params['non_linearities'][1])

        l_backward_data = LSTMLayer(
            l_in, params['n_units'][0], mask_input=l_mask,
            ingate=gate_params, forgetgate=gate_params,
            cell=cell_params, outgate=gate_params,
            learn_init=True, grad_clipping=params['grad_clip'],
            only_return_final=False, backwards=True,
            nonlinearity=params['non_linearities'][0])
        l_backward_noise = LSTMLayer(
            l_noise, params['n_units'][0], mask_input=l_mask,
            ingate=gate_params, forgetgate=gate_params,
            cell=cell_params, outgate=gate_params,
            learn_init=True, grad_clipping=params['grad_clip'],
            only_return_final=False, backwards=True,
            nonlinearity=params['non_linearities'][1])

        # concatenate output of forward and backward layers
        l_lstm_concat = concat(
            [l_forward_data, l_forward_noise, l_backward_data,
             l_backward_noise], axis=2)

        # dense layer on output of data and noise lstms, w/dropout
        l_lstm_dense = DenseLayer(
            DropoutLayer(l_lstm_concat, p=0.5),
            num_units=params['n_units'][1], num_leading_axes=2,
            W=HeNormal(gain='relu'), b=Constant(0.1),
            nonlinearity=params['non_linearities'][2])

        # batch norm for lstm dense
        # l_lstm_dense = lasagne.layer.BatchNorm(l_lstm_dense)

        # concatenate dense layer of lstsm with condition
        l_lstm_cond_concat = concat(
            [l_lstm_dense, l_cond], axis=2)

        # dense layer with dense layer lstm and condition, w/dropout
        l_out = DenseLayer(
            DropoutLayer(l_lstm_cond_concat, p=0.5),
            num_units=params['n_units'][2],
            num_leading_axes=2,
            W=HeNormal(gain=1.0), b=Constant(0.1),
            nonlinearity=params['non_linearities'][3])
    elif arch == 2:
        raise Exception("arch 2 not implemented")
    elif arch == 3:
        raise Exception("arch 2 not implemented")

    print("Generator output:", layer.output_shape)
    return layer
コード例 #22
0
    def build_decoder_conv2d_64_hidden(self, l_Z, params):
        from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
        try:
            from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
        except ImportError:
            raise ImportError(
                "Your Lasagne is too old. Try the bleeding-edge "
                "version: http://lasagne.readthedocs.io/en/latest/"
                "user/installation.html#bleeding-edge-version")
        try:
            from lasagne.layers.dnn import batch_norm_dnn as batch_norm
        except ImportError:
            from lasagne.layers import batch_nor
        from lasagne.nonlinearities import sigmoid
        from lasagne.nonlinearities import LeakyRectify
        lrelu = LeakyRectify(0.2)
        # fully-connected layer
        layer = batch_norm(
            DenseLayer(
                l_Z,
                1024,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w1'],
                b=nn.init.Constant(0.) if params is None else params['b1'],
            ))  # original with relu
        _params = {}
        _params['w1'] = layer.input_layer.input_layer.W
        _params['b1'] = layer.input_layer.input_layer.b
        # project and reshape
        layer = batch_norm(
            DenseLayer(
                layer,
                128 * 8 * 8,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w2'],
                b=nn.init.Constant(0.) if params is None else params['b2'],
            ))  # original with relu
        _params['w2'] = layer.input_layer.input_layer.W
        _params['b2'] = layer.input_layer.input_layer.b
        layer = ReshapeLayer(layer, ([0], 128, 8, 8))
        # two fractional-stride convolutions
        layer = batch_norm(
            Deconv2DLayer(
                layer,
                128,
                5,
                stride=2,
                crop='same',
                output_size=16,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w3'],
                b=nn.init.Constant(0.)
                if params is None else params['b3']))  # original with relu
        _params['w3'] = layer.input_layer.input_layer.W
        _params['b3'] = layer.input_layer.input_layer.b
        layer = batch_norm(
            Deconv2DLayer(
                layer,
                64,
                5,
                stride=2,
                crop='same',
                output_size=32,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w4'],
                b=nn.init.Constant(0.)
                if params is None else params['b4']))  # original with relu
        _params['w4'] = layer.input_layer.input_layer.W
        _params['b4'] = layer.input_layer.input_layer.b
        layer = Deconv2DLayer(
            layer,
            self.channels,
            5,
            stride=2,
            crop='same',
            output_size=64,
            nonlinearity=sigmoid,
            W=nn.init.GlorotUniform() if params is None else params['w5'],
            b=nn.init.Constant(0.) if params is None else params['b5'])
        _params['w5'] = layer.W
        _params['b5'] = layer.b

        l_dec_hid = ReshapeLayer(
            layer, ([0], self.width * self.height * self.channels))
        l_dec_mu, l_dec_logsigma, __params = self.build_decoder_last_layer(
            l_dec_hid, params)
        _params.update(__params)
        return l_dec_mu, l_dec_logsigma, _params