コード例 #1
0
ファイル: wgan.py プロジェクト: nathinal/Theano-MPI
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear and without bias)
    layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    print ("critic output:", layer.output_shape)
    return layer
コード例 #2
0
ファイル: ali_wgan.py プロジェクト: corrigon/AutoEncoders
 def build_critic_28(self, in_x_layer, in_z_layer):
     # two convolutions
     lrelu = LeakyRectify(0.2)
     layer = batch_norm(
         Conv2DLayer(in_x_layer,
                     128,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))
     layer = ReshapeLayer(layer, ([0], 6272 * 2))
     # fully-connected layer
     layer = L.ConcatLayer([layer, in_z_layer])
     layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
     # output layer (linear and without bias)
     return DenseLayer(layer, 1, nonlinearity=None, b=None)
コード例 #3
0
def build_disc(inp):
    lr = LeakyRectify(leakiness=0.2)
    net = InputLayer((None, 3, 256, 256), input_var=inp)
    # 256 * 256
    net = batch_norm(Conv2DLayer(net, 64, 4, stride=2, pad=1, nonlinearity=lr))
    # 128 * 128
    net = batch_norm(Conv2DLayer(net, 128, 4, stride=2, pad=1,
                                 nonlinearity=lr))
    # 64 * 64
    net = batch_norm(Conv2DLayer(net, 256, 4, stride=2, pad=1,
                                 nonlinearity=lr))
    # 32 * 32
    net = batch_norm(Conv2DLayer(net, 512, 4, stride=4, nonlinearity=lr))
    # 8 * 8
    net = batch_norm(Conv2DLayer(net, 512, 4, stride=4, nonlinearity=lr))
    # 2 * 2
    net = batch_norm(DenseLayer(net, 4096, nonlinearity=lr))

    net = batch_norm(DenseLayer(net, 1024, nonlinearity=lr))

    net = DenseLayer(net, 1, nonlinearity=sigmoid)
    print("Discriminator output:", net.output_shape)
    return net
コード例 #4
0
ファイル: dcgan_32.py プロジェクト: ParsonsZeng/BGAN-1
def build_discriminator(input_var=None,
                        dim_h=None,
                        use_batch_norm=True,
                        leak=None):
    if not use_batch_norm:
        bn = lambda x: x
    else:
        bn = batch_norm
    lrelu = LeakyRectify(leak)

    layer = InputLayer(shape=(None, DIM_C, DIM_X, DIM_Y), input_var=input_var)
    layer = Conv2DLayer(layer, dim_h, 4, stride=2, pad=1, nonlinearity=lrelu)
    logger.debug('Discriminator output 1: {}'.format(layer.output_shape))
    layer = bn(
        Conv2DLayer(layer, dim_h * 2, 4, stride=2, pad=1, nonlinearity=lrelu))
    logger.debug('Discriminator output 2: {}'.format(layer.output_shape))
    layer = bn(
        Conv2DLayer(layer, dim_h * 4, 4, stride=2, pad=1, nonlinearity=lrelu))
    logger.debug('Discriminator output 3: {}'.format(layer.output_shape))
    layer = DenseLayer(layer, 1, nonlinearity=None)

    logger.debug('Discriminator output: {}'.format(layer.output_shape))
    return layer
コード例 #5
0
    def test_nonlinearity(self, nonlinearity):
        import lasagne.nonlinearities

        if nonlinearity == 'leaky_rectify_0':
            from lasagne.nonlinearities import LeakyRectify
            theano_nonlinearity = LeakyRectify(leakiness=0)
        elif nonlinearity == 'scaled_tanh':
            from lasagne.nonlinearities import ScaledTanH
            theano_nonlinearity = ScaledTanH()
        elif nonlinearity == 'scaled_tanh_p':
            from lasagne.nonlinearities import ScaledTanH
            theano_nonlinearity = ScaledTanH(scale_in=0.5, scale_out=2.27)
        else:
            theano_nonlinearity = getattr(lasagne.nonlinearities, nonlinearity)
        np_nonlinearity = getattr(self, nonlinearity)

        X = T.matrix()
        X0 = lasagne.utils.floatX(np.random.uniform(-3, 3, (10, 10)))

        theano_result = theano_nonlinearity(X).eval({X: X0})
        np_result = np_nonlinearity(X0)

        assert np.allclose(theano_result, np_result)
コード例 #6
0
 def build_encoder_conv2d_128_hidden(self, l_input):
     from lasagne.nonlinearities import sigmoid
     from lasagne.nonlinearities import LeakyRectify
     from lasagne.layers import Conv2DLayer
     from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
     try:
         from lasagne.layers.dnn import batch_norm_dnn as batch_norm
     except ImportError:
         from lasagne.layers import batch_norm
     # input: 3x128x128dim
     lrelu = LeakyRectify(0.2)
     layer = batch_norm(
         Conv2DLayer(l_input,
                     128,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     # shape 128x64x64
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     5,
                     stride=2,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     # shape 256x32x32
     layer = batch_norm(
         Conv2DLayer(layer,
                     256,
                     7,
                     stride=4,
                     pad='same',
                     nonlinearity=lrelu))  # original with relu
     # shape 256x8x8=8192
     return ReshapeLayer(layer, ([0], 8192 * 2))
コード例 #7
0
def build_critic(input_var=None, verbose=False):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 512, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    if verbose: print ("critic output:", layer.output_shape)
    return layer
コード例 #8
0
def build_model(input_var):

    layers = []

    input_layer = nn.layers.InputLayer(shape=(batch_size, num_channels,
                                              input_width, input_height),
                                       input_var=input_var,
                                       name='inputs')
    layers.append(input_layer)

    conv_1 = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(7, 7),
                         stride=(2, 2),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_1)

    pool_1 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_1)

    conv_2 = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_2)

    conv_3 = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_3)

    pool_2 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_2)

    conv_4 = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_4)

    conv_5 = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_5)

    pool_3 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_3)

    conv_6 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_6)

    conv_7 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_7)

    conv_8 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_8)

    conv_9 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_9)

    pool_4 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_4)

    conv_10 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_10)

    conv_11 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_11)

    conv_12 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_12)

    conv_13 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_13)

    pool_5 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_5)

    drop_1 = nn.layers.DropoutLayer(layers[-1], p=0.5)
    layers.append(drop_1)

    fc_1 = DenseLayer(layers[-1],
                      num_units=1024,
                      nonlinearity=None,
                      W=nn.init.Orthogonal(1.0),
                      b=nn.init.Constant(0.1))
    layers.append(fc_1)

    pool_6 = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(pool_6)

    merge_eyes = nn.layers.ReshapeLayer(layers[-1],
                                        shape=(batch_size // 2, -1))
    layers.append(merge_eyes)

    drop_2 = nn.layers.DropoutLayer(layers[-1], p=0.5)
    layers.append(drop_2)

    fc_2 = DenseLayer(layers[-1],
                      num_units=1024,
                      nonlinearity=None,
                      W=nn.init.Orthogonal(1.0),
                      b=nn.init.Constant(0.1))
    layers.append(fc_2)

    pool_7 = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(pool_7)

    drop_3 = nn.layers.DropoutLayer(layers[-1], p=0.5)
    layers.append(drop_3)

    fc_3 = DenseLayer(layers[-1],
                      num_units=output_dim * 2,
                      nonlinearity=None,
                      W=nn.init.Orthogonal(1.0),
                      b=nn.init.Constant(0.1))
    layers.append(fc_3)

    split_eyes = nn.layers.ReshapeLayer(layers[-1], shape=(batch_size, ))
    layers.append(split_eyes)

    return input_layer, split_eyes
コード例 #9
0
def build_model(input_var):

    layers = []

    input_layer = nn.layers.InputLayer(shape=(None, num_channels, input_width,
                                              input_height),
                                       input_var=input_var,
                                       name='inputs')
    layers.append(input_layer)

    conv_1 = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(7, 7),
                         stride=(2, 2),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_1)

    pool_1 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_1)

    conv_2 = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_2)

    conv_3 = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_3)

    pool_2 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_2)

    conv_4 = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_4)

    conv_5 = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_5)

    pool_3 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_3)

    conv_6 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_6)

    conv_7 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_7)

    conv_8 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_8)

    conv_9 = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(conv_9)

    pool_4 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_4)

    conv_10 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_10)

    conv_11 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_11)

    conv_12 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_12)

    conv_13 = Conv2DLayer(layers[-1],
                          num_filters=256,
                          filter_size=(3, 3),
                          stride=(1, 1),
                          pad='same',
                          nonlinearity=LeakyRectify(leakiness),
                          W=nn.init.Orthogonal(1.0),
                          b=nn.init.Constant(0.1),
                          untie_biases=True)
    layers.append(conv_13)

    pool_5 = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(pool_5)

    drop_1 = nn.layers.DropoutLayer(layers[-1], p=0.5)
    layers.append(drop_1)

    fc_1 = DenseLayer(layers[-1],
                      num_units=2048,
                      nonlinearity=LeakyRectify(leakiness),
                      W=nn.init.Orthogonal(1.0),
                      b=nn.init.Constant(0.1))
    layers.append(fc_1)

    pool_6 = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(pool_6)

    drop_2 = nn.layers.DropoutLayer(layers[-1], p=0.5)
    layers.append(drop_2)

    fc_2 = DenseLayer(layers[-1],
                      num_units=1024,
                      nonlinearity=LeakyRectify(leakiness),
                      W=nn.init.Orthogonal(1.0),
                      b=nn.init.Constant(0.1))
    layers.append(fc_2)

    pool_7 = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(pool_7)

    drop_3 = nn.layers.DropoutLayer(layers[-1], p=0.5)
    layers.append(drop_3)

    softmax_layer = DenseLayer(drop_3,
                               num_units=output_dim,
                               nonlinearity=softmax)
    layers.append(softmax_layer)

    regression_layer = DenseLayer(drop_3, num_units=1, nonlinearity=identity)
    layers.append(regression_layer)

    output = nn.layers.merge.ConcatLayer([softmax_layer, regression_layer])
    layers.append(output)

    return input_layer, output
コード例 #10
0
from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer
from lasagne.nonlinearities import LeakyRectify, sigmoid, softmax, tanh
from matplotlib import pylab as plt
import numpy as np
from PIL import Image
from progressbar import Bar, ProgressBar, Percentage, Timer
import pylab as pl
import random
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import scipy.misc

floatX = lasagne.utils.floatX
floatX_ = theano.config.floatX
lrelu = LeakyRectify(0.2)

DIM_X = 28
DIM_Y = 28
DIM_C = 1

# ##################### UTIL #####################

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.propagate = False
file_formatter = logging.Formatter(
    '%(asctime)s:%(name)s[%(levelname)s]:%(message)s')
stream_formatter = logging.Formatter('[%(levelname)s:%(name)s]:%(message)s' +
                                     ' ' * 40)
コード例 #11
0
def build_generator_facades(input_var=None, ngf=64):
    lrelu = LeakyRectify(0.2)
    # Input layer
    net = InputLayer(shape=(None, 3, 256, 256), input_var=input_var)
    print("Generator input:", net.output_shape)
    # ConvLayer
    net1 = Conv2DLayer(net,
                       ngf, (3, 3), (1, 1),
                       pad=1,
                       W=Normal(0.05),
                       nonlinearity=lrelu)
    print("Gen conv1:", net1.output_shape)
    net2_ = BatchNormLayer(
        Conv2DLayer(net1,
                    ngf, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.05),
                    nonlinearity=None))
    net2 = NonlinearityLayer(net2_, nonlinearity=lrelu)
    print("Gen conv2:", net2.output_shape)
    net3 = batch_norm(
        Conv2DLayer(net2,
                    ngf, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.05),
                    nonlinearity=lrelu))
    print("Gen conv3:", net3.output_shape)
    net4_ = BatchNormLayer(
        Conv2DLayer(net3,
                    ngf, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.05),
                    nonlinearity=None))
    net4 = NonlinearityLayer(net4_, nonlinearity=lrelu)
    print("Gen conv4:", net4.output_shape)
    net5 = batch_norm(
        Conv2DLayer(net4,
                    32, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.05),
                    nonlinearity=lrelu))
    print("Gen conv5:", net5.output_shape)
    net6 = batch_norm(
        Conv2DLayer(net5,
                    1, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.05),
                    nonlinearity=lrelu))
    print("Gen conv6:", net6.output_shape)
    # Decoder
    dnet1 = batch_norm(
        Deconv2DLayer(net6, 32, (3, 3), (1, 1), crop=1, W=Normal(0.05)))
    print("Gen Deconv layer 1:", dnet1.output_shape)
    dnet2_ = BatchNormLayer(
        Deconv2DLayer(dnet1,
                      ngf, (3, 3), (1, 1),
                      crop=1,
                      W=Normal(0.05),
                      nonlinearity=None))
    skip1 = ElemwiseSumLayer([dnet2_, net4_])
    dnet2 = NonlinearityLayer(skip1)
    print("Gen Deconv layer 2:", dnet2.output_shape)
    dnet3 = batch_norm(
        Deconv2DLayer(dnet2, ngf, (3, 3), (1, 1), crop=1, W=Normal(0.05)))
    print("Gen Deconv layer 3:", dnet3.output_shape)
    dnet4_ = BatchNormLayer(
        Deconv2DLayer(dnet3,
                      ngf, (3, 3), (1, 1),
                      crop=1,
                      W=Normal(0.05),
                      nonlinearity=None))
    skip2 = ElemwiseSumLayer([dnet4_, net2_])
    dnet4 = NonlinearityLayer(skip2)
    print("Gen Deconv layer 4:", dnet4.output_shape)
    dnet5 = batch_norm(
        Deconv2DLayer(dnet4, ngf, (3, 3), (1, 1), crop=1, W=Normal(0.05)))
    print("Gen Deconv layer 5:", dnet5.output_shape)
    dnet_out = Deconv2DLayer(dnet5,
                             3, (3, 3), (1, 1),
                             crop=1,
                             W=Normal(0.05),
                             nonlinearity=tanh)
    print("Generator output:", dnet_out.output_shape)
    print(' ')
    return dnet_out
コード例 #12
0
from fuel.streams import DataStream
import h5py
import lasagne
from lasagne.layers import (InputLayer, ReshapeLayer, DenseLayer, batch_norm,
                            GaussianNoiseLayer)
from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer
from lasagne.nonlinearities import LeakyRectify, sigmoid
import numpy as np
from progressbar import Bar, ProgressBar, Percentage, Timer
import pylab as pl
import theano
import theano.tensor as T
import scipy.misc
import yaml

lrelu = LeakyRectify(0.02)
floatX = theano.config.floatX
DIM_X = 32
DIM_Y = 32

# ##################### UTIL #####################

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.propagate = False
file_formatter = logging.Formatter(
    '%(asctime)s:%(name)s[%(levelname)s]:%(message)s')
stream_formatter = logging.Formatter('[%(levelname)s:%(name)s]:%(message)s' +
                                     ' ' * 40)

コード例 #13
0
ファイル: scikit.py プロジェクト: EdwardBetts/kaggle_otto
from validation.optimize import params_name
from nnet.nnio import SnapshotStepSaver, SnapshotEndSaver, save_knowledge, load_knowledge, get_knowledge, set_knowledge
from nnet.dynamic import LogarithmicVariable, LinearVariable
from nnet.early_stopping import StopWhenOverfitting, StopAfterMinimum, StopNaN, BreakEveryN
from settings import NCLASSES, VERBOSITY, NNET_STATE_DIR, DivergenceError, SEED
from lasagne.init import Orthogonal, GlorotNormal, GlorotUniform, HeNormal, HeUniform, Sparse, Constant
from lasagne.nonlinearities import softmax, tanh, sigmoid, rectify, LeakyRectify

seed(SEED)
print 'set random seed to {0} while loading NNet'.format(SEED)

nonlinearities = {
    'tanh': tanh,
    'sigmoid': sigmoid,
    'rectify': rectify,
    'leaky2': LeakyRectify(leakiness=0.02),
    'leaky20': LeakyRectify(leakiness=0.2),
    'softmax': softmax,
}

initializers = {
    'orthogonal': Orthogonal(),
    'sparse': Sparse(),
    'glorot_normal': GlorotNormal(),
    'glorot_uniform': GlorotUniform(),
    'he_normal': HeNormal(),
    'he_uniform': HeUniform(),
}


class NNet(BaseEstimator, ClassifierMixin):
コード例 #14
0
def build_critic(input_var=None, cond_var=None, n_conds=0, arch=0,
                 with_BatchNorm=True, loss_type='wgan'):
    from lasagne.layers import (
        InputLayer, Conv2DLayer, DenseLayer, MaxPool2DLayer, concat,
        dropout, flatten)
    from lasagne.nonlinearities import rectify, LeakyRectify
    from lasagne.init import GlorotUniform  # Normal
    lrelu = LeakyRectify(0.2)
    layer = InputLayer(
        shape=(None, 1, 128, 128), input_var=input_var, name='d_in_data')
    # init = Normal(0.02, 0.0)
    init = GlorotUniform()

    if cond_var:
        # class: from data or from generator input
        layer_cond = InputLayer(
            shape=(None, n_conds), input_var=cond_var, name='d_in_condition')
        layer_cond = BatchNorm(DenseLayer(
            layer_cond, 1024, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
    if arch == 'dcgan':
        # DCGAN inspired
        layer = BatchNorm(Conv2DLayer(
            layer, 32, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 512, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
    elif arch == 'cont-enc':
        # convolution layers
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 512, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
    elif arch == 'mnist':
        # Jan Schluechter's MNIST discriminator
        # convolution layers
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        # layer = BatchNorm(Conv2DLayer(
        #     layer, 128, 5, stride=2, pad='same', W=init, b=None,
        #      nonlinearity=lrelu), with_BatchNorm)
        # fully-connected layer
        # layer = BatchNorm(DenseLayer(
        #     layer, 1024, W=init, b=None, nonlinearity=lrelu), with_BatchNorm)
    elif arch == 'lsgan':
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
    elif arch == 'crepe':
        # CREPE
        # form words from sequence of characters
        layer = BatchNorm(Conv2DLayer(
            layer, 1024, (128, 7), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = MaxPool2DLayer(layer, (1, 3))
        # temporal convolution, 7-gram
        layer = BatchNorm(Conv2DLayer(
            layer, 512, (1, 7), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = MaxPool2DLayer(layer, (1, 3))
        # temporal convolution, 3-gram
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = flatten(layer)
        # fully-connected layers
        layer = dropout(DenseLayer(
            layer, 1024, W=init, b=None, nonlinearity=rectify))
        layer = dropout(DenseLayer(
            layer, 1024, W=init, b=None, nonlinearity=rectify))
    else:
        raise Exception("Model architecture {} is not supported".format(arch))
        # output layer (linear and without bias)
    if cond_var is not None:
        layer = DenseLayer(layer, 1024, nonlinearity=lrelu, b=None)
        layer = concat([layer, layer_cond])

    layer = DenseLayer(layer, 1, b=None, nonlinearity=None)
    print("Critic output:", layer.output_shape)
    return layer
コード例 #15
0
    def build_decoder_conv2d_64_hidden(self, l_Z, params):
        from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
        try:
            from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
        except ImportError:
            raise ImportError(
                "Your Lasagne is too old. Try the bleeding-edge "
                "version: http://lasagne.readthedocs.io/en/latest/"
                "user/installation.html#bleeding-edge-version")
        try:
            from lasagne.layers.dnn import batch_norm_dnn as batch_norm
        except ImportError:
            from lasagne.layers import batch_nor
        from lasagne.nonlinearities import sigmoid
        from lasagne.nonlinearities import LeakyRectify
        lrelu = LeakyRectify(0.2)
        # fully-connected layer
        layer = batch_norm(
            DenseLayer(
                l_Z,
                1024,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w1'],
                b=nn.init.Constant(0.) if params is None else params['b1'],
            ))  # original with relu
        _params = {}
        _params['w1'] = layer.input_layer.input_layer.W
        _params['b1'] = layer.input_layer.input_layer.b
        # project and reshape
        layer = batch_norm(
            DenseLayer(
                layer,
                128 * 8 * 8,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w2'],
                b=nn.init.Constant(0.) if params is None else params['b2'],
            ))  # original with relu
        _params['w2'] = layer.input_layer.input_layer.W
        _params['b2'] = layer.input_layer.input_layer.b
        layer = ReshapeLayer(layer, ([0], 128, 8, 8))
        # two fractional-stride convolutions
        layer = batch_norm(
            Deconv2DLayer(
                layer,
                128,
                5,
                stride=2,
                crop='same',
                output_size=16,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w3'],
                b=nn.init.Constant(0.)
                if params is None else params['b3']))  # original with relu
        _params['w3'] = layer.input_layer.input_layer.W
        _params['b3'] = layer.input_layer.input_layer.b
        layer = batch_norm(
            Deconv2DLayer(
                layer,
                64,
                5,
                stride=2,
                crop='same',
                output_size=32,
                nonlinearity=lrelu,
                W=nn.init.GlorotUniform() if params is None else params['w4'],
                b=nn.init.Constant(0.)
                if params is None else params['b4']))  # original with relu
        _params['w4'] = layer.input_layer.input_layer.W
        _params['b4'] = layer.input_layer.input_layer.b
        layer = Deconv2DLayer(
            layer,
            self.channels,
            5,
            stride=2,
            crop='same',
            output_size=64,
            nonlinearity=sigmoid,
            W=nn.init.GlorotUniform() if params is None else params['w5'],
            b=nn.init.Constant(0.) if params is None else params['b5'])
        _params['w5'] = layer.W
        _params['b5'] = layer.b

        l_dec_hid = ReshapeLayer(
            layer, ([0], self.width * self.height * self.channels))
        l_dec_mu, l_dec_logsigma, __params = self.build_decoder_last_layer(
            l_dec_hid, params)
        _params.update(__params)
        return l_dec_mu, l_dec_logsigma, _params
コード例 #16
0
 ],
 update=nesterov_momentum,  #Todo: optimize
 loss=None,
 objective=Objective,
 regression=False,
 max_epochs=1000,
 eval_size=0.1,
 #on_epoch_finished = None,
 #on_training_finished = None,
 verbose=bool(VERBOSITY),
 input_shape=(None, train.shape[1]),
 output_num_units=NCLASSES,
 dense1_num_units=500,
 dense2_num_units=500,
 dense3_num_units=400,
 dense1_nonlinearity=LeakyRectify(leakiness=0.1),
 dense2_nonlinearity=LeakyRectify(leakiness=0.1),
 dense3_nonlinearity=LeakyRectify(leakiness=0.1),
 output_nonlinearity=softmax,
 dense1_W=HeUniform(),
 dense2_W=HeUniform(),
 dense3_W=HeUniform(),
 dense1_b=Constant(0.),
 dense2_b=Constant(0.),
 dense3_b=Constant(0.),
 output_b=Constant(0.),
 dropout0_p=0.1,
 dropout1_p=0.6,
 dropout2_p=0.6,
 dropout3_p=0.6,
 update_learning_rate=shared(float32(0.02)),  #
コード例 #17
0
def define_net():
    define_net_specific_parameters()

    io = ImageIO()

    # Read pandas csv labels
    y = util.load_labels()

    if params.SUBSET is not 0:
        y = y[:params.SUBSET]

    X = np.arange(y.shape[0])

    mean, std = io.load_mean_std(circularized=params.CIRCULARIZED_MEAN_STD)
    keys = y.index.values

    if params.AUGMENT:
        train_iterator = AugmentingParallelBatchIterator(keys,
                                                         params.BATCH_SIZE,
                                                         std,
                                                         mean,
                                                         y_all=y)
    else:
        train_iterator = ParallelBatchIterator(keys,
                                               params.BATCH_SIZE,
                                               std,
                                               mean,
                                               y_all=y)

    test_iterator = ParallelBatchIterator(keys,
                                          params.BATCH_SIZE,
                                          std,
                                          mean,
                                          y_all=y)

    if params.REGRESSION:
        y = util.float32(y)
        y = y[:, np.newaxis]

    if 'gpu' in theano.config.device:
        # Half of coma does not support cuDNN, check whether we can use it on this node
        # If not, use cuda_convnet bindings
        from theano.sandbox.cuda.dnn import dnn_available
        if dnn_available() and not params.DISABLE_CUDNN:
            from lasagne.layers import dnn
            Conv2DLayer = dnn.Conv2DDNNLayer
            MaxPool2DLayer = dnn.MaxPool2DDNNLayer
        else:
            from lasagne.layers import cuda_convnet
            Conv2DLayer = cuda_convnet.Conv2DCCLayer
            MaxPool2DLayer = cuda_convnet.MaxPool2DCCLayer
    else:
        Conv2DLayer = layers.Conv2DLayer
        MaxPool2DLayer = layers.MaxPool2DLayer

    Maxout = layers.pool.FeaturePoolLayer

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', Conv2DLayer),
            ('pool1', MaxPool2DLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('conv3', Conv2DLayer),
            ('pool3', MaxPool2DLayer),
            ('conv4', Conv2DLayer),
            ('pool4', MaxPool2DLayer),
            ('dropouthidden1', layers.DropoutLayer),
            ('hidden1', layers.DenseLayer),
            ('maxout1', Maxout),
            ('dropouthidden2', layers.DropoutLayer),
            ('hidden2', layers.DenseLayer),
            ('maxout2', Maxout),
            ('dropouthidden3', layers.DropoutLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, params.CHANNELS, params.PIXELS, params.PIXELS),
        conv1_num_filters=32,
        conv1_filter_size=(8, 8),
        conv1_border_mode='same',
        conv1_stride=(2, 2),
        pool1_pool_size=(2, 2),
        pool1_stride=(2, 2),
        conv2_num_filters=64,
        conv2_filter_size=(5, 5),
        conv2_border_mode='same',
        pool2_pool_size=(2, 2),
        pool2_stride=(2, 2),
        conv3_num_filters=128,
        conv3_filter_size=(3, 3),
        conv3_border_mode='same',
        pool3_pool_size=(2, 2),
        pool3_stride=(2, 2),
        conv4_num_filters=256,
        conv4_filter_size=(3, 3),
        conv4_border_mode='same',
        pool4_pool_size=(2, 2),
        pool4_stride=(2, 2),
        hidden1_num_units=1024,
        hidden2_num_units=1024,
        dropouthidden1_p=0.5,
        dropouthidden2_p=0.5,
        dropouthidden3_p=0.5,
        maxout1_pool_size=2,
        maxout2_pool_size=2,
        output_num_units=1 if params.REGRESSION else 5,
        output_nonlinearity=None
        if params.REGRESSION else nonlinearities.softmax,
        conv1_nonlinearity=LeakyRectify(0.1),
        conv2_nonlinearity=LeakyRectify(0.1),
        conv3_nonlinearity=LeakyRectify(0.1),
        conv4_nonlinearity=LeakyRectify(0.1),
        hidden1_nonlinearity=LeakyRectify(0.1),
        hidden2_nonlinearity=LeakyRectify(0.1),
        update_learning_rate=theano.shared(
            util.float32(params.START_LEARNING_RATE)),
        update_momentum=theano.shared(util.float32(params.MOMENTUM)),
        custom_score=('kappa', quadratic_kappa),
        regression=params.REGRESSION,
        batch_iterator_train=train_iterator,
        batch_iterator_test=test_iterator,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                           start=params.START_LEARNING_RATE),
            stats.Stat(),
            ModelSaver()
        ],
        max_epochs=350,
        verbose=1,

        # Only relevant when create_validation_split = True
        eval_size=0.1,

        # Need to specify splits manually like indicated below!
        create_validation_split=params.SUBSET > 0,
    )

    # It is recommended to use the same training/validation split every model for ensembling and threshold optimization
    #
    # To set specific training/validation split:
    net.X_train = np.load(params.IMAGE_SOURCE + "/X_train.npy")
    net.X_valid = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
    net.y_train = np.load(params.IMAGE_SOURCE + "/y_train.npy")
    net.y_valid = np.load(params.IMAGE_SOURCE + "/y_valid.npy")

    return net, X, y
コード例 #18
0
def build_generator_unet(input_var=None, ngf=64):
    # Input layer
    lrelu = LeakyRectify(0.2)
    net = InputLayer(shape=(None, 3, 256, 256), input_var=input_var)
    print("Generator input:", net.output_shape)
    # ConvLayer
    net1_ = Conv2DLayer(net,
                        ngf * 1, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.05),
                        nonlinearity=None)
    net1 = NonlinearityLayer(BatchNormLayer(net1_), nonlinearity=lrelu)
    print("Gen conv1:", net1.output_shape)
    net2_ = Conv2DLayer(net1,
                        ngf * 2, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net2 = NonlinearityLayer(BatchNormLayer(net2_), nonlinearity=lrelu)
    print("Gen conv2:", net2.output_shape)
    net3_ = Conv2DLayer(net2,
                        ngf * 4, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net3 = NonlinearityLayer(BatchNormLayer(net3_), nonlinearity=lrelu)
    print("Gen conv3:", net3.output_shape)
    net4_ = Conv2DLayer(net3,
                        ngf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net4 = NonlinearityLayer(BatchNormLayer(net4_), nonlinearity=lrelu)
    print("Gen conv4:", net4.output_shape)
    net5_ = Conv2DLayer(net4,
                        ngf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net5 = NonlinearityLayer(BatchNormLayer(net5_), nonlinearity=lrelu)
    print("Gen conv5:", net5.output_shape)
    net6_ = Conv2DLayer(net5,
                        ngf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net6 = NonlinearityLayer(BatchNormLayer(net6_), nonlinearity=lrelu)
    print("Gen conv6:", net6.output_shape)
    net7_ = Conv2DLayer(net6,
                        ngf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net7 = NonlinearityLayer(BatchNormLayer(net7_), nonlinearity=lrelu)
    print("Gen conv7:", net7.output_shape)
    net8_ = Conv2DLayer(net7,
                        ngf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net8 = NonlinearityLayer(net8_)
    print("Gen conv8:", net8.output_shape)
    # Decoder

    dnet1_ = DropoutLayer(
        BatchNormLayer(
            Deconv2DLayer(net8,
                          ngf * 8, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=None)))
    skip1 = ConcatLayer([dnet1_, net7_])
    print("skip layer 1:", skip1.output_shape)
    dnet1 = NonlinearityLayer(skip1)
    print("Gen Deconv layer 1:", dnet1.output_shape)

    dnet2_ = DropoutLayer(
        BatchNormLayer(
            Deconv2DLayer(dnet1,
                          ngf * 8, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=None)))
    skip2 = ConcatLayer([dnet2_, net6_])
    print("skip layer 2:", skip2.output_shape)
    dnet2 = NonlinearityLayer(skip2)
    print("Gen Deconv layer 2:", dnet2.output_shape)

    dnet3_ = DropoutLayer(
        BatchNormLayer(
            Deconv2DLayer(dnet2,
                          ngf * 8, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=None)))
    skip3 = ConcatLayer([dnet3_, net5_])
    print("skip layer 3:", skip3.output_shape)
    dnet3 = NonlinearityLayer(skip3)
    print("Gen Deconv layer 3:", dnet3.output_shape)

    dnet4_ = BatchNormLayer(
        Deconv2DLayer(dnet3,
                      ngf * 8, (4, 4), (2, 2),
                      crop=1,
                      W=Normal(0.02),
                      nonlinearity=None))
    skip4 = ConcatLayer([dnet4_, net4_])
    print("skip layer 4:", skip4.output_shape)
    dnet4 = NonlinearityLayer(skip4)
    print("Geneartor deconv 4:", dnet4.output_shape)

    dnet5_ = BatchNormLayer(
        Deconv2DLayer(dnet4,
                      ngf * 4, (4, 4), (2, 2),
                      crop=1,
                      W=Normal(0.02),
                      nonlinearity=None))
    skip5 = ConcatLayer([dnet5_, net3_])
    print("skip layer 5:", skip5.output_shape)
    dnet5 = NonlinearityLayer(skip5)
    print("Geneartor deconv 5:", dnet5.output_shape)

    dnet6_ = BatchNormLayer(
        Deconv2DLayer(dnet5,
                      ngf * 2, (4, 4), (2, 2),
                      crop=1,
                      W=Normal(0.02),
                      nonlinearity=None))
    skip6 = ConcatLayer([dnet6_, net2_])
    print("skip layer 6:", skip6.output_shape)
    dnet6 = NonlinearityLayer(skip6)
    print("Geneartor deconv 6:", dnet6.output_shape)

    dnet7_ = BatchNormLayer(
        Deconv2DLayer(dnet6,
                      ngf, (4, 4), (2, 2),
                      crop=1,
                      W=Normal(0.02),
                      nonlinearity=None))
    skip7 = ConcatLayer([dnet7_, net1_])
    print("skip layer 7:", skip7.output_shape)
    dnet7 = NonlinearityLayer(skip7)
    print("Geneartor deconv 7:", dnet7.output_shape)

    dnet_out = Deconv2DLayer(dnet7,
                             3, (4, 4), (2, 2),
                             crop=1,
                             W=Normal(0.02),
                             nonlinearity=tanh)

    print("Generator output:", dnet_out.output_shape)
    print(' ')
    return dnet_out
コード例 #19
0
def build_discriminator_inpainting(input_var=None, ndf=64):
    lrelu = LeakyRectify(0.2)
    # input: true images
    net = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)
    print("Discriminator input:", net.output_shape)
    net1_ = Conv2DLayer(net,
                        ndf, (3, 3), (1, 1),
                        pad=0,
                        W=Normal(0.5),
                        nonlinearity=None)
    net1 = NonlinearityLayer(net1_, nonlinearity=tanh)
    print("Discriminator conv1:", net1.output_shape)
    net2 = batch_norm(
        Conv2DLayer(net1,
                    ndf * 2, (3, 3), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Discriminator conv2:", net2.output_shape)
    net2 = batch_norm(
        Conv2DLayer(net2,
                    ndf * 2, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    net2_ = Conv2DLayer(net2,
                        ndf * 4, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net2 = NonlinearityLayer(BatchNormLayer(net2_), nonlinearity=lrelu)
    print("Discriminator conv2:", net2.output_shape)
    net3 = batch_norm(
        Conv2DLayer(net2,
                    ndf * 4, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    net3_ = Conv2DLayer(net3,
                        ndf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net3 = NonlinearityLayer(BatchNormLayer(net3_), nonlinearity=lrelu)
    print("Discriminator conv3:", net3.output_shape)
    net4 = batch_norm(
        Conv2DLayer(net3,
                    ndf * 8, (3, 3), (1, 1),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    net4_ = Conv2DLayer(net4,
                        ndf * 8, (3, 3), (2, 2),
                        pad=1,
                        W=Normal(0.02),
                        nonlinearity=None)
    net4 = NonlinearityLayer(BatchNormLayer(net4_), nonlinearity=lrelu)
    print("Discriminator conv4:", net4.output_shape)
    net5 = Conv2DLayer(net4,
                       8, (3, 3), (1, 1),
                       pad=1,
                       W=Normal(0.02),
                       nonlinearity=None)
    print("Discriminator conv5:", net5.output_shape)
    net_out = DenseLayer(net5, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print("Discriminator output:", net_out.output_shape)
    print(' ')
    return net1, net2, net3, net4, net_out
コード例 #20
0
def build_generator_inpainting(input_var=None, ngf=64):
    # Input layer
    lrelu = LeakyRectify(0.2)
    net = InputLayer(shape=(None, 3, 128, 128), input_var=input_var)
    print("Generator input:", net.output_shape)
    # ConvLayer
    net1 = Conv2DLayer(net,
                       ngf, (3, 3), (2, 2),
                       pad=1,
                       W=Normal(0.02),
                       nonlinearity=lrelu)
    print("Gen conv1:", net1.output_shape)
    net2 = batch_norm(
        Conv2DLayer(net1,
                    ngf, (3, 3), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Gen conv2:", net2.output_shape)
    net3 = batch_norm(
        Conv2DLayer(net2,
                    ngf * 2, (3, 3), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Gen conv3:", net3.output_shape)
    net4 = batch_norm(
        Conv2DLayer(net3,
                    ngf * 4, (3, 3), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Gen conv4:", net4.output_shape)
    net5 = batch_norm(
        Conv2DLayer(net4,
                    ngf * 8, (3, 3), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Gen conv5:", net5.output_shape)
    net6 = batch_norm(
        Conv2DLayer(net5,
                    4000, (4, 4),
                    pad=0,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Gen conv6:", net6.output_shape)
    # Decoder
    dnet1 = batch_norm(
        Deconv2DLayer(net6, ngf * 8, (4, 4), (1, 1), crop=0, W=Normal(0.02)))
    print("Gen Deconv layer 1:", dnet1.output_shape)

    dnet2 = batch_norm(
        Deconv2DLayer(dnet1, ngf * 4, (4, 4), (2, 2), crop=1, W=Normal(0.02)))
    print("Gen Deconv layer 2:", dnet2.output_shape)

    dnet3 = batch_norm(
        Deconv2DLayer(dnet2, ngf * 2, (4, 4), (2, 2), crop=1, W=Normal(0.02)))
    print("Gen Deconv layer 3:", dnet3.output_shape)

    dnet4 = batch_norm(
        Deconv2DLayer(dnet3, ngf, (4, 4), (2, 2), crop=1, W=Normal(0.02)))
    print("Geneartor deconv 4:", dnet4.output_shape)

    dnet_out = Deconv2DLayer(dnet4,
                             3, (4, 4), (2, 2),
                             crop=1,
                             W=Normal(0.02),
                             nonlinearity=tanh)

    print("Generator output:", dnet_out.output_shape)
    print(' ')
    return dnet_out
コード例 #21
0
def build_model():
    layers = []

    l_in_imgdim = nn.layers.InputLayer(shape=(batch_size, 2), name='imgdim')

    l_in1 = nn.layers.InputLayer(shape=(batch_size, num_channels, input_width,
                                        input_height),
                                 name='images')
    layers.append(l_in1)

    Conv2DLayer = dnn.Conv2DDNNLayer
    MaxPool2DLayer = dnn.MaxPool2DDNNLayer
    DenseLayer = nn.layers.DenseLayer

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(7, 7),
                         stride=(2, 2),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    # l_conv = Conv2DLayer(layers[-1],
    #                      num_filters=32, filter_size=(3, 3), stride=(1, 1),
    #                      border_mode='same',
    #                      nonlinearity=LeakyRectify(leakiness),
    #                      W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
    #                      untie_biases=True,
    #                      learning_rate_scale=1.0)
    # layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    # l_conv = Conv2DLayer(layers[-1],
    #                      num_filters=64, filter_size=(3, 3), stride=(1, 1),
    #                      border_mode='same',
    #                      nonlinearity=LeakyRectify(leakiness),
    #                      W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
    #                      untie_biases=True,
    #                      learning_rate_scale=1.0)
    # layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         border_mode='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)
    l_pool = MaxPool2DLayer(layers[-1],
                            pool_size=(3, 3),
                            stride=(2, 2),
                            name='coarse_last_pool')
    layers.append(l_pool)

    layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
    layers.append(
        DenseLayer(layers[-1],
                   nonlinearity=None,
                   num_units=1024,
                   W=nn.init.Orthogonal(1.0),
                   b=nn.init.Constant(0.1),
                   name='first_fc_0'))
    l_pool = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(l_pool)

    l_first_repr = layers[-1]

    l_coarse_repr = nn.layers.concat([l_first_repr, l_in_imgdim])
    layers.append(l_coarse_repr)

    # Combine representations of both eyes.
    layers.append(
        nn.layers.ReshapeLayer(layers[-1], shape=(batch_size // 2, -1)))

    layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
    layers.append(
        nn.layers.DenseLayer(layers[-1],
                             nonlinearity=None,
                             num_units=1024,
                             W=nn.init.Orthogonal(1.0),
                             b=nn.init.Constant(0.1),
                             name='combine_repr_fc'))
    l_pool = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(l_pool)

    l_hidden = nn.layers.DenseLayer(
        nn.layers.DropoutLayer(layers[-1], p=0.5),
        num_units=output_dim * 2,
        nonlinearity=None,  # No softmax yet!
        W=nn.init.Orthogonal(1.0),
        b=nn.init.Constant(0.1))
    layers.append(l_hidden)

    # Reshape back to 5.
    layers.append(nn.layers.ReshapeLayer(layers[-1], shape=(batch_size, 5)))

    # Apply softmax.
    l_out = ApplyNonlinearity(layers[-1],
                              nonlinearity=nn.nonlinearities.softmax)
    layers.append(l_out)

    l_ins = [l_in1, l_in_imgdim]

    return l_out, l_ins
コード例 #22
0
ファイル: osemi.py プロジェクト: sronen71/otto
def build_net(randomize=False,
              loss=categorical_crossentropy,
              y_tensor_type=None,
              dropfactor=1.0,
              sizefactor=1):

    layers0 = [('input', InputLayer), ('dropin', DropoutLayer),
               ('dense0', DenseLayer), ('dropout0', DropoutLayer),
               ('dense1', DenseLayer), ('dropout1', DropoutLayer),
               ('dense2', DenseLayer), ('dropout2', DropoutLayer),
               ('output', DenseLayer)]
    n = [int(512 * sizefactor), int(800 * sizefactor), int(1024 * sizefactor)]
    leak = [0.3, 0.0, 0.0]
    drop = [0.1, 0.2, 0.3, 0.4]
    if randomize:
        for i in range(3):
            n[i] += np.random.randint(low=-n[i] // 15, high=n[i] // 15)
        """
        for i in range(4):
            drop[i] *= np.random.uniform(0.8,1.2)
        leak[0]=np.random.uniform(0.2,0.3)
        leak[1]=np.random.uniform(0,0.1)
        leak[2]=np.random.uniform(0.0,0.05)
        """
        print "net: ", n, leak, drop

    net0 = NeuralNet(
        layers=layers0,
        input_shape=(None, num_features),
        dropin_p=drop[0] * dropfactor,
        dense0_num_units=n[0],
        dense0_W=HeNormal(),
        dense0_nonlinearity=LeakyRectify(leak[0]),
        dropout0_p=drop[1] * dropfactor,
        dense1_num_units=n[1],
        dense1_nonlinearity=LeakyRectify(leak[1]),
        dense1_W=HeNormal(),
        dropout1_p=drop[2] * dropfactor,
        dense2_num_units=n[2],  # 1024
        dense2_nonlinearity=LeakyRectify(leak[2]),
        dense2_W=HeNormal(),
        dropout2_p=drop[3] * dropfactor,
        output_num_units=num_classes,
        output_nonlinearity=softmax,
        update=nesterov_momentum,
        update_learning_rate=theano.shared(tfloat32(0.02)),
        update_momentum=theano.shared(tfloat32(0.9)),
        eval_size=0.0,
        verbose=1,
        max_epochs=150,  #150
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                           epochs=[50, 100],
                           rates=[2e-3, 2e-4])
        ],
        regularization_rate=1e-5,
        batch_iterator_train=BatchIterator(batch_size=128),
        objective_loss_function=loss,
        y_tensor_type=y_tensor_type)

    return net0
コード例 #23
0
def build_network():

    net = {}
    net['input'] = InputLayer((None, 3, 448, 448))
    net['conv1/7x7_s2'] = ConvLayer(net['input'],
                                    64,
                                    7,
                                    stride=2,
                                    pad=2,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))

    net['pool1/2x2_s2'] = PoolLayer(net['conv1/7x7_s2'],
                                    pool_size=2,
                                    stride=2,
                                    ignore_border=False)

    net['conv2/3x3_s1'] = ConvLayer(net['pool1/2x2_s2'],
                                    192,
                                    3,
                                    stride=1,
                                    pad=1,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))
    net['pool2/2x2_s2'] = PoolLayer(net['conv2/3x3_s1'],
                                    pool_size=2,
                                    stride=2,
                                    ignore_border=False)

    net['conv3/1x1_s1'] = ConvLayer(net['pool2/2x2_s2'],
                                    128,
                                    1,
                                    stride=1,
                                    pad=0,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))
    net['conv4/3x3_s1'] = ConvLayer(net['conv3/1x1_s1'],
                                    256,
                                    3,
                                    stride=1,
                                    pad=1,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))
    net['conv5/1x1_s1'] = ConvLayer(net['conv4/3x3_s1'],
                                    256,
                                    1,
                                    stride=1,
                                    pad=0,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))
    net['conv6/3x3_s1'] = ConvLayer(net['conv5/1x1_s1'],
                                    512,
                                    3,
                                    stride=1,
                                    pad=1,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))

    net['pool3/2x2_s2'] = PoolLayer(net['conv6/3x3_s1'],
                                    pool_size=2,
                                    stride=2,
                                    ignore_border=False)

    ## 4 - times
    net['conv7/1x1_s1'] = ConvLayer(net['pool3/2x2_s2'],
                                    256,
                                    1,
                                    stride=1,
                                    pad=0,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))
    net['conv8/3x3_s1'] = ConvLayer(net['conv7/1x1_s1'],
                                    512,
                                    3,
                                    stride=1,
                                    pad=1,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))

    net['conv9/1x1_s1'] = ConvLayer(net['conv8/3x3_s1'],
                                    256,
                                    1,
                                    stride=1,
                                    pad=0,
                                    flip_filters=False,
                                    nonlinearity=LeakyRectify(0.1))
    net['conv10/3x3_s1'] = ConvLayer(net['conv9/1x1_s1'],
                                     512,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))

    net['conv11/1x1_s1'] = ConvLayer(net['conv10/3x3_s1'],
                                     256,
                                     1,
                                     stride=1,
                                     pad=0,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv12/3x3_s1'] = ConvLayer(net['conv11/1x1_s1'],
                                     512,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))

    net['conv13/1x1_s1'] = ConvLayer(net['conv12/3x3_s1'],
                                     256,
                                     1,
                                     stride=1,
                                     pad=0,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv14/3x3_s1'] = ConvLayer(net['conv13/1x1_s1'],
                                     512,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    ####

    net['conv15/1x1_s1'] = ConvLayer(net['conv14/3x3_s1'],
                                     512,
                                     1,
                                     stride=1,
                                     pad=0,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv16/3x3_s1'] = ConvLayer(net['conv15/1x1_s1'],
                                     1024,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))

    ## maxpool  4 ===>
    net['pool4/2x2_s2'] = PoolLayer(net['conv16/3x3_s1'],
                                    pool_size=2,
                                    stride=2,
                                    ignore_border=False)

    ## 2 - times
    net['conv17/1x1_s1'] = ConvLayer(net['pool4/2x2_s2'],
                                     512,
                                     1,
                                     stride=1,
                                     pad=0,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv18/3x3_s1'] = ConvLayer(net['conv17/1x1_s1'],
                                     1024,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))

    net['conv19/1x1_s1'] = ConvLayer(net['conv18/3x3_s1'],
                                     512,
                                     1,
                                     stride=1,
                                     pad=0,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv20/3x3_s1'] = ConvLayer(net['conv19/1x1_s1'],
                                     1024,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    ####

    net['conv21/3x3_s1'] = ConvLayer(net['conv20/3x3_s1'],
                                     1024,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv22/3x3_s2'] = ConvLayer(net['conv21/3x3_s1'],
                                     1024,
                                     3,
                                     stride=2,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))

    net['conv23/3x3_s1'] = ConvLayer(net['conv22/3x3_s2'],
                                     1024,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))
    net['conv24/3x3_s1'] = ConvLayer(net['conv23/3x3_s1'],
                                     1024,
                                     3,
                                     stride=1,
                                     pad=1,
                                     flip_filters=False,
                                     nonlinearity=LeakyRectify(0.1))

    # dense layer
    net['dense1'] = DenseLayer(net['conv24/3x3_s1'],
                               num_units=4096,
                               nonlinearity=LeakyRectify(0.1))
    net['dropout'] = DropoutLayer(net['dense1'], p=0.5)
    net['dense2'] = DenseLayer(net['dropout'],
                               num_units=1470,
                               nonlinearity=linear)

    net['output_layer'] = net['dense2']
    ## detection params

    return net
コード例 #24
0
 def build_decoder_conv2d_128_local_hidden(self, l_Z, params):
     from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
     from LocallyConnected2DLayer import LocallyConnected2DLayer
     try:
         from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
     except ImportError:
         raise ImportError(
             "Your Lasagne is too old. Try the bleeding-edge "
             "version: http://lasagne.readthedocs.io/en/latest/"
             "user/installation.html#bleeding-edge-version")
     try:
         from lasagne.layers.dnn import batch_norm_dnn as batch_norm
     except ImportError:
         from lasagne.layers import batch_norm
     from lasagne.nonlinearities import sigmoid
     from lasagne.nonlinearities import LeakyRectify
     lrelu = LeakyRectify(0.2)
     # fully-connected layer
     layer = batch_norm(
         DenseLayer(
             l_Z,
             4096,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w1'],
             b=nn.init.Constant(0.) if params is None else params['b1'],
         ))  # original with relu
     _params = {}
     _params['w1'] = layer.input_layer.input_layer.W
     _params['b1'] = layer.input_layer.input_layer.b
     # project and reshape
     # shape 1024
     layer = batch_norm(
         DenseLayer(
             layer,
             256 * 8 * 8,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w2'],
             b=nn.init.Constant(0.) if params is None else params['b2'],
             # shape 256x8x8
         ))  # original with relu
     _params['w2'] = layer.input_layer.input_layer.W
     _params['b2'] = layer.input_layer.input_layer.b
     layer = ReshapeLayer(layer, ([0], 256, 8, 8))
     # two fractional-stride convolutions
     layer = batch_norm(
         Deconv2DLayer(
             layer,
             256,
             7,
             stride=4,
             crop='same',
             output_size=32,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w3'],
             b=nn.init.Constant(0.)
             if params is None else params['b3']))  # original with relu
     # shape 256x32x32
     _params['w3'] = layer.input_layer.input_layer.W
     _params['b3'] = layer.input_layer.input_layer.b
     _layer = batch_norm(
         Deconv2DLayer(
             layer,
             64,
             9,
             stride=4,
             crop='same',
             output_size=128,
             nonlinearity=lrelu,
             W=nn.init.GlorotUniform() if params is None else params['w4'],
             b=nn.init.Constant(0.)
             if params is None else params['b4']))  # original with relu
     # shape 128x64x64
     _params['w4'] = _layer.input_layer.input_layer.W
     _params['b4'] = _layer.input_layer.input_layer.b
     layer = LocallyConnected2DLayer(
         _layer,
         self.channels,
         7,
         stride=1,
         pad='same',
         nonlinearity=sigmoid,
         untie_biases=True,
         W=nn.init.GlorotUniform() if params is None else params['w_mu'],
         b=nn.init.Constant(0.) if params is None else params['b_mu'])
     _params['w_mu'] = layer.W
     _params['b_mu'] = layer.b
     l_dec_mu = ReshapeLayer(
         layer, ([0], self.width * self.height * self.channels))
     # relu_shift is for numerical stability - if training data has any
     # dimensions where stdev=0, allowing logsigma to approach -inf
     # will cause the loss function to become NAN. So we set the limit
     # stdev >= exp(-1 * relu_shift)
     relu_shift = 10
     layer = LocallyConnected2DLayer(
         _layer,
         self.channels,
         7,
         stride=1,
         pad='same',
         nonlinearity=lambda a: T.nnet.relu(a + relu_shift) - relu_shift,
         W=nn.init.GlorotUniform()
         if params is None else params['w_logsigma'],
         b=nn.init.Constant(0.) if params is None else params['b_logsigma'])
     _params['w_logsigma'] = layer.W
     _params['b_logsigma'] = layer.b
     l_dec_logsigma = ReshapeLayer(
         layer, ([0], self.width * self.height * self.channels))
     # shape 3x128x128
     return l_dec_mu, l_dec_logsigma, _params
コード例 #25
0
ファイル: otto-B.py プロジェクト: sronen71/otto
def build_net(randomize=False):

    layers0=[('input',InputLayer),
            ('dropin',DropoutLayer),
            ('dense0',DenseLayer),
            ('dropout0',DropoutLayer),
            ('dense1',DenseLayer),
            ('dropout1',DropoutLayer),
            ('dense2',DenseLayer),
            ('dropout2',DropoutLayer),
            ('dense3',DenseLayer),
            ('dropout3',DropoutLayer),
            ('output',DenseLayer)]
    n=[256,1024,1024,256]
    leak=[0.25,0.00,0.0,0.0]
    drop=[0.12,0.15,0.2,0.3,0.5]
    if randomize:
        for i in range(4):
            n[i] += np.random.randint(low=-n[i]//15,high=n[i]//15) 
        """
        for i in range(4):
            drop[i] *= np.random.uniform(0.8,1.2)
            leak[0]=np.random.uniform(0.2,0.3)
            leak[1]=np.random.uniform(0,0.1)
            leak[2]=np.random.uniform(0.0,0.05)
        """
        print "net: ", n,leak,drop

    net0=NeuralNet(layers=layers0,
        input_shape=(None,num_features),
        dropin_p=drop[0],
        dense0_num_units=n[0],
        dense0_W=HeNormal(),
        dense0_nonlinearity=LeakyRectify(leak[0]),

        dropout0_p=drop[1],
        dense1_num_units=n[1],
        dense1_nonlinearity=LeakyRectify(leak[1]),
        dense1_W=HeNormal(), 
 

        dropout1_p=drop[2],
        dense2_num_units=n[2],
        dense2_nonlinearity=LeakyRectify(leak[2]),
        dense2_W=HeNormal(),
 
        dropout2_p=drop[3], 

        dense3_num_units=n[3], 
        dense3_nonlinearity=LeakyRectify(leak[3]),
        dense3_W=HeNormal(),
        dropout3_p=drop[4], 
 

        output_num_units=num_classes,
        output_nonlinearity=softmax,

        update=nesterov_momentum,
        update_learning_rate = theano.shared(tfloat32(0.02)),
        update_momentum = theano.shared(tfloat32(0.9)),
        eval_size=0.0,
        verbose=1,
        max_epochs=150, 
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                epochs=[50,100],rates=[2e-3,2e-4])],
        regularization_rate=1e-5,
        batch_iterator_train=BatchIterator(batch_size=128)
        )

    return net0
コード例 #26
0
ファイル: otto2.py プロジェクト: sronen71/otto
layers0=[('input',InputLayer),
        ('dropin',DropoutLayer),
        ('dense0',DenseLayer),
        ('dropout0',DropoutLayer),
        ('dense1',DenseLayer),
        ('dropout1',DropoutLayer),
        ('dense2',DenseLayer),
        ('dropout2',DropoutLayer),
        ('output',DenseLayer)]
net0=NeuralNet(layers=layers0,
        input_shape=(None,num_features),
        dropin_p=0.1,
        dense0_num_units=512,
        dense0_W=HeNormal(),
        #dense0_W=Orthogonal('relu'),
        dense0_nonlinearity=LeakyRectify(0.3),

        dropout0_p=0.2,
        dense1_num_units=800,
        dense1_nonlinearity=LeakyRectify(0.0),
        dense1_W=HeNormal(), 
        #dense1_W=Orthogonal('relu'),
 

        dropout1_p=0.3,
        dense2_num_units=1024, # 1024
        dense2_nonlinearity=LeakyRectify(0.0),
        dense2_W=HeNormal(),
        #dense2_W=Orthogonal('relu'),
 
        dropout2_p=0.4, 
コード例 #27
0
def test_leaky_rectify():
    from lasagne.nonlinearities import LeakyRectify
    lr = LeakyRectify(0.1)
    assert np.allclose(lr(np.array([-1, 0, 1, 2])), [-.1, 0, 1, 2])
コード例 #28
0
            1], self.factor * input_shape[2], self.factor * input_shape[3]

    def get_output_for(self, input, **kwargs):
        """
        Constructs the Theano graph for this layer
        :param input: Symbolic input variable
        :return: Symbolic output variable
        """
        return T.nnet.abstract_conv.bilinear_upsampling(
            input,
            self.factor,
            batch_size=self.input_shape[0],
            num_input_channels=self.input_shape[1])


leaky_rectify = LeakyRectify(0.1)


def leaky_conv(input_layer, pad='same', **kwargs):
    return Conv2DLayer(input_layer,
                       nonlinearity=leaky_rectify,
                       pad=pad,
                       flip_filters=False,
                       **kwargs)


def leaky_deconv(input_layer, **kwargs):
    return Deconv2DLayer(input_layer,
                         nonlinearity=leaky_rectify,
                         filter_size=4,
                         stride=2,
コード例 #29
0
ファイル: jeffrey.py プロジェクト: jkalmeij/cnn-vizlib
def from_raw():
    layers = []
    batch_size = 2

    num_channels = 3
    input_height = input_width = 512
    leakiness = 0.5

    l_in_imgdim = nn.layers.InputLayer(shape=(batch_size, 2), name='imgdim')

    l_in1 = nn.layers.InputLayer(shape=(batch_size, num_channels, input_width,
                                        input_height),
                                 name='images')
    layers.append(l_in1)

    Conv2DLayer = dnn.Conv2DDNNLayer
    MaxPool2DLayer = dnn.MaxPool2DDNNLayer
    DenseLayer = nn.layers.DenseLayer

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(7, 7),
                         stride=(2, 2),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=32,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=64,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=128,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
    layers.append(l_pool)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)

    l_conv = Conv2DLayer(layers[-1],
                         num_filters=256,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         nonlinearity=LeakyRectify(leakiness),
                         W=nn.init.Orthogonal(1.0),
                         b=nn.init.Constant(0.1),
                         untie_biases=True)
    layers.append(l_conv)
    l_pool = MaxPool2DLayer(layers[-1],
                            pool_size=(3, 3),
                            stride=(2, 2),
                            name='coarse_last_pool')
    layers.append(l_pool)

    layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
    layers.append(
        DenseLayer(layers[-1],
                   nonlinearity=None,
                   num_units=1024,
                   W=nn.init.Orthogonal(1.0),
                   b=nn.init.Constant(0.1),
                   name='first_fc_0'))
    l_pool = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(l_pool)

    l_first_repr = layers[-1]

    l_coarse_repr = nn.layers.concat([l_first_repr, l_in_imgdim])
    layers.append(l_coarse_repr)

    # Combine representations of both eyes.
    layers.append(
        nn.layers.ReshapeLayer(layers[-1], shape=(batch_size // 2, -1)))

    layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
    layers.append(
        nn.layers.DenseLayer(layers[-1],
                             nonlinearity=None,
                             num_units=1024,
                             W=nn.init.Orthogonal(1.0),
                             b=nn.init.Constant(0.1),
                             name='combine_repr_fc'))
    l_pool = nn.layers.FeaturePoolLayer(layers[-1],
                                        pool_size=2,
                                        pool_function=T.max)
    layers.append(l_pool)

    l_hidden = nn.layers.DenseLayer(
        nn.layers.DropoutLayer(layers[-1], p=0.5),
        num_units=output_dim * 2,
        nonlinearity=None,  # No softmax yet!
        W=nn.init.Orthogonal(1.0),
        b=nn.init.Constant(0.1))
    layers.append(l_hidden)

    # Reshape back to 5.
    layers.append(nn.layers.ReshapeLayer(layers[-1], shape=(batch_size, 5)))

    # Apply softmax.
    l_out = ApplyNonlinearity(layers[-1],
                              nonlinearity=nn.nonlinearities.softmax)
    layers.append(l_out)

    l_ins = [l_in1, l_in_imgdim]

    nn.layers.set_all_param_values(l_out,
                                   pickle.load(open(RAW_DUMP_PATH, 'rb')))

    return l_out, l_ins
コード例 #30
0
def build_cnn(data_len, input_var=None, division=1):

    length = data_len
    channel = 1
    dropout_ratio = .5
    filter_size = 3
    pool_length = 3
    stride = 1
    dropout_internal = .5
    num_filters = 32

    my_leaky_rectify = LeakyRectify(0.2)

    # input
    my_input = lasagne.layers.InputLayer(shape=(None, channel, length),
                                         input_var=input_var)

    my_input = lasagne.layers.StandardizationLayer(my_input)

    ################################################################
    #0
    conv0 = SincConv(my_input, fs=16000, N_filt=80, Filt_dim=251)
    # conv0 = T.abs(conv0)

    pool0 = lasagne.layers.MaxPool1DLayer(conv0, pool_size=3)

    std0 = lasagne.layers.StandardizationLayer(pool0)

    act0 = lasagne.layers.NonlinearityLayer(std0,
                                            nonlinearity=my_leaky_rectify)
    ###########################################################################
    #1
    conv1 = lasagne.layers.Conv1DLayer(act0,
                                       num_filters=60,
                                       filter_size=5,
                                       W=lasagne.init.GlorotNormal())

    pool1 = lasagne.layers.MaxPool1DLayer(conv1, pool_size=3)

    std1 = lasagne.layers.StandardizationLayer(pool1)

    act1 = lasagne.layers.NonlinearityLayer(std1,
                                            nonlinearity=my_leaky_rectify)
    ##############################################################################
    #2
    conv2 = lasagne.layers.Conv1DLayer(act1,
                                       num_filters=60,
                                       filter_size=5,
                                       W=lasagne.init.GlorotNormal())

    pool2 = lasagne.layers.MaxPool1DLayer(conv2, pool_size=3)

    std2 = lasagne.layers.StandardizationLayer(pool2)

    act2 = lasagne.layers.NonlinearityLayer(std2,
                                            nonlinearity=my_leaky_rectify)
    ###########################################################################

    conv3 = batch_norm(
        lasagne.layers.DenseLayer(act2,
                                  num_units=2048,
                                  W=lasagne.init.GlorotNormal(),
                                  nonlinearity=my_leaky_rectify))

    conv4 = batch_norm(
        lasagne.layers.DenseLayer(conv3,
                                  num_units=2048,
                                  W=lasagne.init.GlorotNormal(),
                                  nonlinearity=my_leaky_rectify))

    conv5 = batch_norm(
        lasagne.layers.DenseLayer(conv4,
                                  num_units=2048,
                                  W=lasagne.init.GlorotNormal(),
                                  nonlinearity=my_leaky_rectify))

    network = lasagne.layers.DenseLayer(
        conv5, num_units=15, nonlinearity=lasagne.nonlinearities.softmax)

    # network = lasagne.layers.NonlinearityLayer(conv6,nonlinearity=lasagne.nonlinearities.softmax)

    return network