コード例 #1
0
def net_lenet5_wn_proj_do_tangent(input_shape, nclass, alpha):
    # tensor 4 because (n objects, n channels, pic size, pic size)
    input_x, target_y, Winit = T.tensor4("input"), T.vector(
        "target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)
    net = layers.WeightNormProjectedDropOut(net,
                                            1024,
                                            Wfc=init.Normal(),
                                            alpha=alpha,
                                            tangent=True)
    net = layers.WeightNormProjectedDropOut(net,
                                            1024,
                                            Wfc=init.Normal(),
                                            alpha=alpha,
                                            tangent=True)
    net = layers.WeightNormProjectedDropOut(net,
                                            2048,
                                            Wfc=init.Normal(),
                                            alpha=alpha,
                                            tangent=True)
    net = layers.WeightNormProjectedDropOut(net,
                                            nclass,
                                            Wfc=init.Normal(),
                                            alpha=alpha,
                                            tangent=True,
                                            nonlinearity=nl.softmax)

    return net, input_x, target_y, 5
コード例 #2
0
def recurrent(input_var=None,
              num_units=512,
              batch_size=64,
              seq_length=1,
              grad_clip=100):
    recurrent = []

    theano_rng = RandomStreams(rng.randint(2**15))
    # we want noise to match tanh range of activation ([-1,1])
    noise = theano_rng.uniform(size=(batch_size, seq_length, num_units),
                               low=-1.0,
                               high=1.0)
    input_var = noise if input_var is None else input_var

    recurrent.append(
        ll.InputLayer(shape=(batch_size, seq_length, num_units),
                      input_var=input_var))

    recurrent.append(
        ll.LSTMLayer(recurrent[-1], num_units,
                     grad_clipping=grad_clip))  #tanh is default

    recurrent.append(ll.SliceLayer(recurrent[-1], -1, 1))

    recurrent.append(ll.ReshapeLayer(recurrent[-1], ([0], 1, [1])))

    for layer in recurrent:
        print layer.output_shape
    print ""

    return recurrent
コード例 #3
0
def encoder(z_dim=100, input_var=None, num_units=512, vae=True):
    encoder = []
    lrelu = lasagne.nonlinearities.LeakyRectify(0.2)

    encoder.append(ll.InputLayer(shape=(None, 3, 80, 160),
                                 input_var=input_var))

    encoder.append(
        ll.Conv2DLayer(encoder[-1],
                       num_filters=num_units / 8,
                       filter_size=(5, 5),
                       stride=2,
                       pad=2,
                       nonlinearity=lrelu))

    encoder.append(
        ll.batch_norm(
            ll.Conv2DLayer(encoder[-1],
                           num_filters=num_units / 4,
                           filter_size=(5, 5),
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    encoder.append(
        ll.batch_norm(
            ll.Conv2DLayer(encoder[-1],
                           num_filters=num_units / 2,
                           filter_size=(5, 5),
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    encoder.append(
        ll.batch_norm(
            ll.Conv2DLayer(encoder[-1],
                           num_filters=num_units,
                           filter_size=(5, 5),
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    encoder.append(ll.FlattenLayer(encoder[-1]))

    if vae:
        enc_mu = ll.DenseLayer(encoder[-1], num_units=z_dim, nonlinearity=None)

        enc_logsigma = ll.DenseLayer(encoder[-1],
                                     num_units=z_dim,
                                     nonlinearity=None)

        l_z = GaussianSampleLayer(enc_mu, enc_logsigma, name='Z layer')

        encoder += [enc_mu, enc_logsigma, l_z]

    for layer in encoder:
        print layer.output_shape
    print ""

    return encoder
コード例 #4
0
def discriminator_3D(input_var=None, num_units=512, seq_length=4):
    discriminator = []
    lrelu = lasagne.nonlinearities.LeakyRectify(0.2)

    discriminator.append(
        ll.InputLayer(shape=(None, seq_length, 3, 80, 160),
                      input_var=input_var))

    # lasagne documentations requires shape :
    # (batch_size, num_input_channels, input_depth, input_rows, input_columns)
    # so we need to change dimension ordering

    discriminator.append(ll.DimshuffleLayer(discriminator[-1],
                                            (0, 2, 1, 3, 4)))

    discriminator.append(
        ll.Conv3DLayer(discriminator[-1],
                       num_filters=num_units / 8,
                       filter_size=5,
                       stride=2,
                       pad=2,
                       nonlinearity=lrelu))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units / 4,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units / 2,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(ll.FlattenLayer(discriminator[-1]))

    discriminator.append(
        ll.DenseLayer(discriminator[-1], num_units=1, nonlinearity=None))

    for layer in discriminator:
        print layer.output_shape
    print ""

    return discriminator
コード例 #5
0
def build_siamese(layer):
    """"""
    smx = nonlinearities.softmax
    lnr = nonlinearities.linear
    layers = L.get_all_layers(layer)
    nl = filter(
        lambda l: hasattr(l, 'nonlinearity') and (
            (l.nonlinearity != smx) and (l.nonlinearity != lnr)),
        layers)[0].nonlinearity

    if len(layers[0].output_shape) == 3:
        Xl = T.tensor3('left')
        Xr = T.tensor3('right')
    elif len(layers[0].output_shape) == 4:
        Xl = T.tensor4('left')
        Xr = T.tensor4('right')

    Ol = L.get_output(layer, inputs=Xl)
    # Ol_vl = L.get_output(layer, inputs=Xl, deterministic=True)
    Or = L.get_output(layer, inputs=Xr)
    O = T.concatenate([Ol, Or], axis=-1)

    layer = L.InputLayer((None, layer.output_shape[-1] * 2), input_var=O)
    layer = L.DenseLayer(layer, 128, nonlinearity=None, name='hc1')
    layer = L.BatchNormLayer(layer)
    layer = L.NonlinearityLayer(layer, nonlinearity=nl)
    layer = L.DenseLayer(layer, 2, nonlinearity=smx)

    return layer, (Xl, Xr)
コード例 #6
0
def pons_cnn(params):
    """"""
    layers = L.InputLayer((None, 1, params['dur'], 128))
    print layers.output_shape

    sclr = joblib.load(params['scaler'])
    layers = L.standardize(layers,
                           sclr.mean_.astype(np.float32),
                           sclr.scale_.astype(np.float32),
                           shared_axes=(0, 1, 2))
    print layers.output_shape

    layers_timbre = L.GlobalPoolLayer(
        L.batch_norm(L.Conv2DLayer(layers, 64, (1, 96))))

    layers_rhythm = L.GlobalPoolLayer(
        L.batch_norm(L.Conv2DLayer(layers, 64, (params['dur'] - 10, 1))))

    layers = L.ConcatLayer([layers_rhythm, layers_timbre], axis=-1)

    layers = L.DenseLayer(layers, 64, nonlinearity=nl.rectify)
    print layers.output_shape

    layers = L.DenseLayer(layers, 16, nonlinearity=nl.softmax)
    print layers.output_shape

    return layers
コード例 #7
0
def ptb_lstm(input_var, vocabulary_size, hidden_size, seq_len, num_layers,
             dropout, batch_size):
    l_input = L.InputLayer(shape=(batch_size, seq_len), input_var=input_var)
    l_embed = L.EmbeddingLayer(l_input,
                               vocabulary_size,
                               hidden_size,
                               W=init.Uniform(1.0))
    l_lstms = []
    for i in range(num_layers):
        l_lstm = L.LSTMLayer(l_embed if i == 0 else l_lstms[-1],
                             hidden_size,
                             ingate=L.Gate(W_in=init.GlorotUniform(),
                                           W_hid=init.Orthogonal()),
                             forgetgate=L.Gate(W_in=init.GlorotUniform(),
                                               W_hid=init.Orthogonal(),
                                               b=init.Constant(1.0)),
                             cell=L.Gate(
                                 W_in=init.GlorotUniform(),
                                 W_hid=init.Orthogonal(),
                                 W_cell=None,
                                 nonlinearity=lasagne.nonlinearities.tanh),
                             outgate=L.Gate(W_in=init.GlorotUniform(),
                                            W_hid=init.Orthogonal()))
        l_lstms.append(l_lstm)
    l_drop = L.DropoutLayer(l_lstms[-1], dropout)
    l_out = L.DenseLayer(l_drop, num_units=vocabulary_size, num_leading_axes=2)
    l_out = L.ReshapeLayer(
        l_out,
        (l_out.output_shape[0] * l_out.output_shape[1], l_out.output_shape[2]))
    l_out = L.NonlinearityLayer(l_out,
                                nonlinearity=lasagne.nonlinearities.softmax)
    return l_out
コード例 #8
0
    def __build_48_net__(self):

        model24 = self.subnet
        network = layers.InputLayer((None, 3, 48, 48),
                                    input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(
            layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2))
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.BatchNormLayer(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.DenseLayer(network, num_units=256, nonlinearity=relu)
        #network = layers.Conv2DLayer(network,num_filters=256,filter_size=(1,1),stride=1,nonlinearity=relu)
        denselayer24 = model24.net.input_layer
        network = layers.ConcatLayer([network, denselayer24])
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
コード例 #9
0
def arch_class_02(dim_desc, dim_labels, param_arch, logger):
    logger.info('Architecture:')
    # input layers
    desc = LL.InputLayer(shape=(None, dim_desc))
    patch_op = LL.InputLayer(input_var=Tsp.csc_fmatrix('patch_op'),
                             shape=(None, None))
    logger.info('   input  : dim = %d' % dim_desc)
    # layer 1: dimensionality reduction to 16
    n_dim = 16
    net = LL.DenseLayer(desc, n_dim)
    logger.info('   layer 1: FC%d' % n_dim)
    # layer 2: anisotropic convolution layer with 16 filters
    n_filters = 16
    net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16)
    string = '   layer 2: IC%d' % n_filters
    if param_arch['flag_batchnorm'] is True:
        net = LL.batch_norm(net)
        string = string + ' + batch normalization'
    logger.info(string)
    # layer 3: anisotropic convolution layer with 32 filters
    n_filters = 32
    net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16)
    string = '   layer 3: IC%d' % n_filters
    if param_arch['flag_batchnorm'] is True:
        net = LL.batch_norm(net)
        string = string + ' + batch normalization'
    logger.info(string)
    # layer 4: anisotropic convolution layer with 64 filters
    n_filters = 64
    net = CL.GCNNLayer([net, patch_op], n_filters, nrings=5, nrays=16)
    string = '   layer 4: IC%d' % n_filters
    if param_arch['flag_batchnorm'] is True:
        net = LL.batch_norm(net)
        string = string + ' + batch normalization'
    logger.info(string)
    # layer 5: softmax layer producing a probability on the labels
    if param_arch['non_linearity'] == 'softmax':
        cla = LL.DenseLayer(net, dim_labels, nonlinearity=LN.softmax)
        string = '   layer 5: softmax'
    elif param_arch['non_linearity'] == 'log_softmax':
        cla = LL.DenseLayer(net, dim_labels, nonlinearity=log_softmax)
        string = '   layer 5: log-softmax'
    else:
        raise Exception('[e] the chosen non-linearity is not supported!')
    logger.info(string)
    # outputs
    return desc, patch_op, cla, net, logger
コード例 #10
0
def build_model(n_input,
                n_hidden,
                optimizer=adagrad,
                l2_weight=1e-4,
                l1_weight=1e-2):
    '''
	build NN model to estimating model function
	'''
    global LR

    input_A = L.InputLayer((None, n_input), name='A')
    layer_A = L.DenseLayer(input_A, n_hidden, b=None, nonlinearity=identity)

    input_B = L.InputLayer((None, n_input), name='B')
    layer_B = L.DenseLayer(input_B, n_hidden, b=None, nonlinearity=identity)

    merge_layer = L.ElemwiseSumLayer((layer_A, layer_B))

    output_layer = L.DenseLayer(merge_layer, 1, b=None,
                                nonlinearity=identity)  # output is scalar

    x1 = T.matrix('x1')
    x2 = T.matrix('x2')
    y = T.matrix('y')

    out = L.get_output(output_layer, {input_A: x1, input_B: x2})
    params = L.get_all_params(output_layer)
    loss = T.mean(squared_error(out, y))

    # add l1 penalty
    l1_penalty = regularize_layer_params([layer_A, layer_B, output_layer], l1)

    # add l2 penalty
    l2_penalty = regularize_layer_params([layer_A, layer_B, output_layer], l2)

    # get loss + penalties
    loss = loss + l1_penalty * l1_weight + l2_penalty * l2_weight

    updates_sgd = optimizer(loss, params, learning_rate=LR)
    updates = apply_momentum(updates_sgd, params, momentum=0.9)
    # updates = optimizer(loss,params,learning_rate=LR)

    f_train = theano.function([x1, x2, y], loss, updates=updates)
    f_test = theano.function([x1, x2, y], loss)
    f_out = theano.function([x1, x2], out)

    return f_train, f_test, f_out, output_layer
コード例 #11
0
def create_network(n_actions,
                   file_name,
                   width=64,
                   height=48,
                   gru_units=64,
                   att_units=48):

    l_action = L.InputLayer((None, ))
    l_input = L.InputLayer((None, 1, height, width))
    l_attention = L.InputLayer((None, 24))
    l_hidden1 = L.InputLayer((None, gru_units))
    l_hidden2 = L.InputLayer((None, gru_units))

    l_cnn = build_cnn(l_input)

    l_gru = GRUStepLayer([l_action, l_cnn, l_attention, l_hidden1, l_hidden2],
                         gru_units, att_units, n_actions)

    l_out = L.DenseLayer(l_gru, num_units=n_actions, nonlinearity=LN.softmax)

    with open(file_name, 'rb') as file:
        L.set_all_param_values(l_out, cPickle.load(file))

    action = T.ivector('action')
    state = T.tensor4('state')
    attention = T.matrix('attention')
    hidden1 = T.matrix('hidden1')
    hidden2 = T.matrix('hidden2')

    step_hidden2, step_output = L.get_output(
        [l_gru, l_out], {
            l_action: action,
            l_input: state,
            l_attention: attention,
            l_hidden1: hidden1,
            l_hidden2: hidden2
        },
        deterministic=True)

    step_hidden1 = l_gru.hidden1
    step_attention = l_gru.attention

    _output_step = theano.function(
        [action, state, attention, hidden1, hidden2],
        [step_attention, step_hidden1, step_hidden2, step_output])

    return l_gru, _output_step
コード例 #12
0
def build_discriminator(input_var=None,
                        nfilters=[64, 128, 256, 512],
                        input_channels=3):

    ###############################
    # Build Network Configuration #
    ###############################

    print('... Building the discriminator')

    leaky = nonlinearities.LeakyRectify(0.2)

    # Input of the network : shape = (batch_size, 3, 64, 64)
    network = layers.InputLayer(shape=(None, input_channels, 64, 64),
                                input_var=input_var)

    # Conv layer : shape = (batch_size, 64, 32, 32)
    network = layers.Conv2DLayer(network,
                                 num_filters=nfilters[0],
                                 filter_size=(5, 5),
                                 stride=2,
                                 pad=2,
                                 nonlinearity=leaky)

    # Conv layer : shape = (batch_size, 128, 16, 16)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[1],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 256, 8, 8)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[2],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 512, 4, 4)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[3],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Flatten layer :shape = (batch_size, 8192)
    network = lasagne.layers.FlattenLayer(network)

    # Dense layer :shape = (batch_size, 1)
    network = lasagne.layers.DenseLayer(
        network, 1, nonlinearity=lasagne.nonlinearities.sigmoid)

    return network
コード例 #13
0
def build_autoencoder_network():
    input_var = T.tensor4('input_var');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
    pool1 =            layers.MaxPool2DLayer(layer, (2, 2), 2);
    layer = batch_norm(layers.Conv2DLayer(pool1, 240,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
    pool2 =            layers.MaxPool2DLayer(layer, (2, 2), 2);
    layer = batch_norm(layers.Conv2DLayer(pool2, 640,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));

    featm    = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
    feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
    maskm    = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
    mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1,   filter_size=(1,1), nonlinearity=None),   beta=None, gamma=None);
    mask_map = SoftThresPerc(mask_rep, perc=90.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
    layer    = ChInnerProdMerge(feat_map, mask_map, name="encoder");

    layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(1,1), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.InverseLayer(layer, pool2);
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(1,1), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.InverseLayer(layer, pool1);
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 100,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.Deconv2DLayer(layer, 3,    filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    glblf = batch_norm(layers.Conv2DLayer(prely, 128,  filter_size=(1,1), nonlinearity=leaky_rectify));
    glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
    glblf = batch_norm(layers.Conv2DLayer(glblf, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Conv2DLayer(glblf, 5,    filter_size=(1,1), nonlinearity=rectify), name="global_feature");

    glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf =            layers.Deconv2DLayer(glblf, 3,   filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    layer = layers.ElemwiseSumLayer([layer, glblf]);

    network = ReshapeLayer(layer, ([0], -1));
    mask_var = lasagne.layers.get_output(mask_map);
    output_var = lasagne.layers.get_output(network);

    return network, input_var, mask_var, output_var;
コード例 #14
0
def input_block(net, config, melspec=False, verbose=True):
    """
    """
    # load scaler
    sclr = joblib.load(config.paths.preproc.scaler)

    net['input'] = L.InputLayer(shape=get_in_shape(config), name='input')
    sigma = theano.shared(np.array(0., dtype=np.float32),
                          name='noise_controller')
    net['noise'] = L.GaussianNoiseLayer(net['input'],
                                        sigma=sigma,
                                        name='input_corruption')

    if config.hyper_parameters.input == "melspec":

        net['sclr'] = L.standardize(net['noise'],
                                    offset=sclr.mean_.astype(np.float32),
                                    scale=sclr.scale_.astype(np.float32),
                                    shared_axes=(0, 1, 2))
    else:
        net['stft'] = STFTLayer(L.ReshapeLayer(net['noise'],
                                               ([0], [1], [2], 1),
                                               name='reshape'),
                                n_fft=config.hyper_parameters.n_fft,
                                hop_size=config.hyper_parameters.hop_size)

        if melspec:
            net['melspec'] = MelSpecLayer(
                sr=config.hyper_parameters.sample_rate,
                n_fft=config.hyper_parameters.n_fft,
                n_mels=128,
                log_amplitude=True)

            net['sclr'] = L.standardize(net['melspec'],
                                        offset=sclr.mean_.astype(np.float32),
                                        scale=sclr.scale_.astype(np.float32),
                                        shared_axes=(0, 1, 2))

        else:
            net['sclr'] = L.standardize(net['stft'],
                                        offset=sclr.mean_.astype(np.float32),
                                        scale=sclr.scale_.astype(np.float32),
                                        shared_axes=(0, 1, 2))

            # only pooling freq domain
            net['stft.pl'] = L.MaxPool2DLayer(net['sclr'],
                                              pool_size=(2, 1),
                                              name='stft.pl')

    if verbose:
        print(net['input'].output_shape)
        # if melspec:
        #     print(net['melspec'].output_shape)
        # else:
        #     print(net['stft'].output_shape)
        #     print(net['stft.pl'].output_shape)
        print(net['sclr'].output_shape)

    return net, sigma
コード例 #15
0
ファイル: feat.py プロジェクト: EggplantElf/sclem2017-tagger
    def get_emb_layer(self, sidx, tidx=None, avg=False):
        # do not create multiple emb_layer for the same feature
        # if self.emb_layer:
        #     return self.emb_layer

        if tidx is None:
            fidx = self.data[sidx]  # (100, 161) or (100, 161, 16)
            fidx_layer = L.InputLayer(shape=[None] + self.data_shape,
                                      input_var=fidx)
        else:
            fidx = self.data[sidx.dimshuffle(0, 'x'), tidx]  # (100, 26)
            fidx_layer = L.InputLayer(shape=[None] +
                                      self.config[self.name]['feat_shape'],
                                      input_var=fidx)
        self.emb_layer = self.get_emb_layer_from_idx(fidx_layer, avg)

        return self.emb_layer
def makeRNN(xInputRNN, hiddenInitRNN, hidden2InitRNN, sequenceLen, vocabularySize, neuralNetworkSz):

	input_Layer = L.InputLayer(input_var = xInputRNN, shape = (None, sequenceLen))
	hidden_Layer = L.InputLayer(input_var = hiddenInitRNN, shape = (None, neuralNetworkSz))
	hidden_Layer2 = L.InputLayer(input_var = hidden2InitRNN, shape = (None, neuralNetworkSz))
	input_Layer = L.EmbeddingLayer(input_Layer, input_size = vocabularySize, output_size = neuralNetworkSz)

	RNN_Layer = L.LSTMLayer(input_Layer, num_units = neuralNetworkSz, hid_init = hidden_Layer)
	h = L.DropoutLayer(RNN_Layer, p = dropOutProbability)
	RNN_Layer2 = L.LSTMLayer(h, num_units = neuralNetworkSz, hid_init = hidden_Layer2)
	h = L.DropoutLayer(RNN_Layer2, p = dropOutProbability)

	layerShape = L.ReshapeLayer(h, (-1, neuralNetworkSz))
	
	predictions = NCE(layerShape, num_units = vocabularySize, Z = Z)
	predictions = L.ReshapeLayer(predictions, (-1, sequenceLen, vocabularySize))
	return RNN_Layer, RNN_Layer2, predictions
コード例 #17
0
ファイル: init_policy.py プロジェクト: quinnliu/pixelworld
    def __init__(self,
                 input_shape,
                 output_dim,
                 hidden_dim,
                 hidden_nonlinearity=NL.rectify,
                 output_nonlinearity=None,
                 name=None,
                 input_var=None,
                 output_b_init=LI.Constant(0.)):
        l_in = L.InputLayer(shape=(None, None) + input_shape,
                            input_var=input_var)
        l_step_input = L.InputLayer(shape=(None, ) + input_shape)
        l_step_prev_hidden = L.InputLayer(shape=(None, hidden_dim))
        l_gru = GRULayer(l_in,
                         num_units=hidden_dim,
                         hidden_nonlinearity=hidden_nonlinearity,
                         hidden_init_trainable=False)
        l_gru_flat = L.ReshapeLayer(l_gru, shape=(-1, hidden_dim))
        l_output_flat = L.DenseLayer(l_gru_flat,
                                     num_units=output_dim,
                                     nonlinearity=output_nonlinearity,
                                     b=output_b_init)
        l_output = OpLayer(
            l_output_flat,
            op=lambda flat_output, l_input: flat_output.reshape(
                (l_input.shape[0], l_input.shape[1], -1)),
            shape_op=lambda flat_output_shape, l_input_shape:
            (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
            extras=[l_in])
        l_step_hidden = l_gru.get_step_layer(l_step_input, l_step_prev_hidden)
        l_step_output = L.DenseLayer(
            l_step_hidden,
            num_units=output_dim,
            nonlinearity=output_nonlinearity,
            W=l_output_flat.W,
            b=l_output_flat.b,
        )

        self._l_in = l_in
        self._hid_init_param = l_gru.h0
        self._l_gru = l_gru
        self._l_out = l_output
        self._l_step_input = l_step_input
        self._l_step_prev_hidden = l_step_prev_hidden
        self._l_step_hidden = l_step_hidden
        self._l_step_output = l_step_output
コード例 #18
0
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
コード例 #19
0
    def init_discriminator(self, first_layer, input_var=None):
        """
        Initialize the DCGAN discriminator network using lasagne
        Returns the network
        """

        lrelu = nonlinearities.LeakyRectify(0.2)
        layers = []

        l_in = lyr.InputLayer((None, 3, 64, 64), input_var)
        layers.append(l_in)

        l_1 = lyr.Conv2DLayer(incoming=l_in,
                              num_filters=first_layer,
                              filter_size=5,
                              stride=2,
                              pad=2,
                              nonlinearity=lrelu)
        layers.append(l_1)

        l_2 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_1,
                            num_filters=first_layer * 2,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        layers.append(l_2)

        l_3 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_2,
                            num_filters=first_layer * 4,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        layers.append(l_3)

        l_4 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_3,
                            num_filters=first_layer * 8,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        l_4 = lyr.FlattenLayer(l_4)
        layers.append(l_4)

        l_out = lyr.DenseLayer(incoming=l_4,
                               num_units=1,
                               nonlinearity=nonlinearities.sigmoid)
        layers.append(l_out)

        if self.verbose:
            for i, layer in enumerate(layers):
                print 'dicriminator layer %s output shape:' % i, layer.output_shape

        return l_out
コード例 #20
0
def build_correlation_fft(x, y, size):
    pnet = ll.InputLayer((None, 3, 101, 101), input_var=None)
    pnet = ll.BatchNormLayer(pnet)
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(
        ll.BatchNormLayer(pnet),
        nonlinearity=l.nonlinearities.LeakyRectify(0.1))
    pnet = ll.Pool2DLayer(pnet, (3, 3), stride=(2, 2))
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(
        ll.BatchNormLayer(pnet),
        nonlinearity=l.nonlinearities.LeakyRectify(0.1))
    pnet = ll.Conv2DLayer(pnet, 32, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.BatchNormLayer(pnet)
    x_p, y_p = ll.get_output(pnet, x), ll.get_output(pnet, y)
    x_p, y_p = fft.rfft(x_p, 'ortho'), fft.rfft(y_p, 'ortho')

    XX, XY = T.zeros_like(x_p), T.zeros_like(y_p)
    XX = T.set_subtensor(
        XX[:, :, :, :, 0], x_p[:, :, :, :, 0] * x_p[:, :, :, :, 0] +
        x_p[:, :, :, :, 1] * x_p[:, :, :, :, 1])
    XY = T.set_subtensor(
        XY[:, :, :, :, 0], x_p[:, :, :, :, 0] * y_p[:, :, :, :, 0] +
        x_p[:, :, :, :, 1] * y_p[:, :, :, :, 1])
    XY = T.set_subtensor(
        XY[:, :, :, :, 1], x_p[:, :, :, :, 0] * y_p[:, :, :, :, 1] -
        x_p[:, :, :, :, 1] * y_p[:, :, :, :, 0])
    xx = fft.irfft(XX, 'ortho')
    xy = fft.irfft(XY, 'ortho')

    z_p = T.concatenate((xx, xy), axis=1)
    z_p *= T.constant(hanningwindow(50))
    net = ll.InputLayer((None, 64, 50, 50), input_var=z_p)
    net = ll.BatchNormLayer(net)
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad')
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None))
    net = ll.DenseLayer(net, size**2, b=None, nonlinearity=None)
    net = ll.ReshapeLayer(net, ([0], 1, size, size))
    return pnet, net
コード例 #21
0
def build_MLP_mnist_bn():
    net = {}
    net['input'] = ll.InputLayer(shape=(None, 1, 28, 28), input_var=None)
    net['d0'] = batch_norm(ll.DenseLayer(net['input'], num_units=1200, nonlinearity=lasagne.nonlinearities.rectify), steps=num_steps)
    net['mu1'] = ll.DenseLayer(net['d0'], num_units=28*28, nonlinearity=lasagne.nonlinearities.sigmoid)
    net['var1'] = ll.DenseLayer(net['d0'], num_units=28*28, nonlinearity=lasagne.nonlinearities.sigmoid)
    net['mu'] = ll.ReshapeLayer(net['mu1'], (([0], 1, 28, 28)))
    net['var'] = ll.ReshapeLayer(net['var1'], (([0], 1, 28, 28)))
    return net
コード例 #22
0
def build_segmenter_simple():
    inp = ll.InputLayer(shape=(None, 1, None, None), name='input')
    conv1 = ll.Conv2DLayer(inp,
                           num_filters=32,
                           filter_size=(7, 7),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=rectify,
                           name='conv1')
    conv2 = ll.Conv2DLayer(conv1,
                           num_filters=64,
                           filter_size=(5, 5),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=rectify,
                           name='conv2')
    conv3 = ll.Conv2DLayer(conv2,
                           num_filters=128,
                           filter_size=(5, 5),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=rectify,
                           name='conv3')
    conv4 = ll.Conv2DLayer(conv3,
                           num_filters=64,
                           filter_size=(5, 5),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=rectify,
                           name='conv4')
    conv5 = ll.Conv2DLayer(conv4,
                           num_filters=32,
                           filter_size=(3, 3),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=rectify,
                           name='conv5')
    conv6 = ll.Conv2DLayer(conv5,
                           num_filters=16,
                           filter_size=(3, 3),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=rectify,
                           name='conv6')

    # our output layer is also convolutional, remember that our Y is going to be the same exact size as the
    conv_final = ll.Conv2DLayer(conv6,
                                num_filters=2,
                                filter_size=(3, 3),
                                pad='same',
                                W=Orthogonal(),
                                name='conv_final',
                                nonlinearity=linear)
    # we need to reshape it to be a (batch*n*m x 3), i.e. unroll s.t. the feature dimension is preserved
    softmax = Softmax4D(conv_final, name='4dsoftmax')

    return [softmax]
コード例 #23
0
 def _forward(self, inputX, hidden_units):
     rows, cols = inputX.shape
     layer = layers.InputLayer(shape=(rows, cols), input_var=self.X)
     layer = layers.DenseLayer(layer, num_units=hidden_units,
                               W=init.GlorotUniform(), b=init.Uniform(),
                               nonlinearity=nonlinearities.tanh)
     Hout = layers.get_output(layer)
     forwardfn = theano.function([self.X], Hout, allow_input_downcast=True)
     return forwardfn(inputX)
コード例 #24
0
def encoder_u(latent_dim, input_var=None):
    # input is concatenation of MNIST digit and one-hot encoded label
    input = layers.InputLayer(shape=(None, DATA_DIM), input_var=input_var)
    h1 = lasagne.layers.DenseLayer(input, NETWORK_DIM, nonlinearity=tanh)
    h2 = lasagne.layers.DenseLayer(h1, NETWORK_DIM, nonlinearity=tanh)
    h3 = lasagne.layers.DenseLayer(h2, NETWORK_DIM, nonlinearity=tanh)
    mu = lasagne.layers.DenseLayer(h3, latent_dim, nonlinearity=linear)
    log_std = lasagne.layers.DenseLayer(h3, latent_dim, nonlinearity=linear)
    return mu, log_std
コード例 #25
0
def test_bilinear_group_conv(x_shape, u_shape, batch_size=2):
    X_var = T.tensor4('X')
    U_var = T.matrix('U')
    l_x = L.InputLayer(shape=(None, ) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None, ) + u_shape, input_var=U_var, name='u')
    X = np.random.random((batch_size, ) + x_shape).astype(theano.config.floatX)
    U = np.random.random((batch_size, ) + u_shape).astype(theano.config.floatX)

    l_xu_outer = LT.OuterProductLayer([l_x, l_u])
    l_x_diff_pred = LT.GroupConv2DLayer(l_xu_outer,
                                        x_shape[0],
                                        filter_size=5,
                                        stride=1,
                                        pad='same',
                                        untie_biases=True,
                                        groups=x_shape[0],
                                        nonlinearity=None,
                                        W=init.Uniform(),
                                        b=init.Uniform())
    X_diff_pred_var = L.get_output(l_x_diff_pred)
    X_diff_pred_fn = theano.function([X_var, U_var], X_diff_pred_var)
    X_diff_pred = X_diff_pred_fn(X, U)

    u_dim, = u_shape
    l_x_convs = []
    for i in range(u_dim + 1):
        l_x_conv = LT.GroupConv2DLayer(
            l_x,
            x_shape[0],
            filter_size=5,
            stride=1,
            pad='same',
            untie_biases=True,
            groups=x_shape[0],
            nonlinearity=None,
            W=l_x_diff_pred.W.get_value()[:, i:i + 1],
            b=l_x_diff_pred.b.get_value() if i == u_dim else None)
        l_x_convs.append(l_x_conv)
    l_x_diff_pred_bw = LT.BatchwiseSumLayer(l_x_convs + [l_u])
    X_diff_pred_bw_var = L.get_output(l_x_diff_pred_bw)
    X_diff_pred_bw_fn = theano.function([X_var, U_var], X_diff_pred_bw_var)
    X_diff_pred_bw = X_diff_pred_bw_fn(X, U)

    assert np.allclose(X_diff_pred, X_diff_pred_bw, atol=1e-7)
コード例 #26
0
    def __init__(self,
                 output_dim,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=LI.GlorotUniform(),
                 hidden_b_init=LI.Constant(0.),
                 output_W_init=LI.GlorotUniform(),
                 output_b_init=LI.Constant(0.),
                 name=None,
                 input_var=None,
                 input_layer=None,
                 input_shape=None,
                 batch_norm=False):

        Serializable.quick_init(self, locals())

        if name is None:
            prefix = ""
        else:
            prefix = name + "_"

        if input_layer is None:
            l_in = L.InputLayer(shape=(None, ) + input_shape,
                                input_var=input_var)
        else:
            l_in = input_layer
        self._layers = [l_in]
        l_hid = l_in
        for idx, hidden_size in enumerate(hidden_sizes):
            l_hid = L.DenseLayer(
                l_hid,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="%shidden_%d" % (prefix, idx),
                W=hidden_W_init,
                b=hidden_b_init,
            )
            if batch_norm:
                l_hid = L.batch_norm(l_hid)
            self._layers.append(l_hid)

        l_out = L.DenseLayer(
            l_hid,
            num_units=output_dim,
            nonlinearity=output_nonlinearity,
            name="%soutput" % (prefix, ),
            W=output_W_init,
            b=output_b_init,
        )
        self._layers.append(l_out)
        self._l_in = l_in
        self._l_out = l_out
        # self._input_var = l_in.input_var
        self._output = L.get_output(l_out)
        LasagnePowered.__init__(self, [l_out])
コード例 #27
0
ファイル: word2vec.py プロジェクト: julianser/word2vec
 def model(self, query_input, batch_size, query_vocab_size,
           context_vocab_size, emb_dim_size):
     l_input = L.InputLayer(shape=(batch_size, ), input_var=query_input)
     l_embed = L.EmbeddingLayer(l_input,
                                input_size=query_vocab_size,
                                output_size=emb_dim_size)
     l_out = L.DenseLayer(l_embed,
                          num_units=context_vocab_size,
                          nonlinearity=lasagne.nonlinearities.softmax)
     return l_embed, l_out
コード例 #28
0
def net_lenet5(input_shape, nclass):
    input_x, target_y, Winit = T.tensor4("input"), T.vector("target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)

    net = layers.DenseVarDropOutARD(net, 300, W=init.Normal())
    net = layers.DenseVarDropOutARD(net, 100, W=init.Normal())
    net = layers.DenseVarDropOutARD(net, nclass, W=init.Normal(), nonlinearity=nl.softmax)

    return net, input_x, target_y, 1
コード例 #29
0
 def toygenerator(n_hidden, input_var=None):
     network = layers.InputLayer(shape=(None, n_hidden),
                                 input_var=input_var)
     # tanh = lasagne.nonlinearities.tanh
     relu = lasagne.nonlinearities.rectify
     linear = lasagne.nonlinearities.linear
     network = lasagne.layers.DenseLayer(network, 20, nonlinearity=relu)
     network = lasagne.layers.DenseLayer(network, 20, nonlinearity=relu)
     network = lasagne.layers.DenseLayer(network, 2, nonlinearity=linear)
     return network
コード例 #30
0
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw dnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, 3969), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_1',
                                           dropout=False,
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_2',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_3',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_4',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, MC_LENGTH), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list,
                                     axis=1,
                                     cropping=[None, None, 'center'])
        network = layers.BatchNormLayer(network)
        for n in self.net_opts['layer_list']:
            network = layers.DenseLayer(
                layers.dropout(network, p=self.net_opts['dropout_p']),
                n,
                nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(
            layers.dropout(network, p=self.net_opts['dropout_p']),
            self.net_opts['num_class'],
            nonlinearity=lasagne.nonlinearities.softmax)

        # print(layers.get_output_shape(network))
        self.network = network
        return self.network