示例#1
0
def build_cnn(input_var, pretrained_model):
    #pretrained layers from vgg16
    conv1_1 = pretrained_model['conv1_1']
    conv1_2 = pretrained_model['conv1_2']

    #new layers
    network = InputLayer(shape=(None, 3, 48, 48), input_var=input_var)

    network = ConvLayer(network,
                        64,
                        3,
                        pad=1,
                        flip_filters=False,
                        W=conv1_1.W.get_value(),
                        b=conv1_1.b.get_value())

    network = ConvLayer(network,
                        64,
                        3,
                        pad=1,
                        flip_filters=False,
                        W=conv1_2.W.get_value(),
                        b=conv1_2.b.get_value())

    network = MaxPoolLayer(network, pool_size=(2, 2))

    network = DenseLayer(dropout(network, p=.5),
                         num_units=256,
                         nonlinearity=lasagne.nonlinearities.rectify)

    network = DenseLayer(dropout(network, p=.5),
                         num_units=7,
                         nonlinearity=lasagne.nonlinearities.softmax)

    return network
示例#2
0
def build_cnn(config, use_noise=True, use_bn=True):
    
    # NOTE: Neither Conv2DDNNLayer nor Conv2DMMLayer will not work
    # with T.Rop operation, which used for the Fisher-vector product.
    
    l_input = L.InputLayer((None, 1, config['height'], config['width']))

    l_out = L.Conv2DLayer(l_input,
        num_filters=config['cnn_f1'], filter_size=(6,6), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    # https://arxiv.org/pdf/1602.01407v2.pdf
    # QUOTE: KFC-pre and BN can be combined synergistically.
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)

    l_out = L.Conv2DLayer(l_out,
        num_filters=config['cnn_f2'], filter_size=(4,4), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)
    if use_noise: l_out = L.dropout(l_out)
    
    l_out = L.Conv2DLayer(l_out,
        num_filters=config['cnn_f3'], filter_size=(4,4), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)
    if use_noise: l_out = L.dropout(l_out)
    
    return l_input, l_out
示例#3
0
文件: models.py 项目: srviest/SoloLa-
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw dnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, 3969), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_1', dropout=False, pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_2', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_3', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_4', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, MC_LENGTH), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list, axis=1, cropping=[None, None, 'center'])
        network = layers.BatchNormLayer(network)
        for n in self.net_opts['layer_list']:
            network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        # print(layers.get_output_shape(network))
        self.network = network
        return self.network
示例#4
0
def buildControlNN(fingerprint_vals, fingerprint_dim, output_dim, final_layer_type,
    dropout_prob=0.0, neural_net=[]):

    network_vals = {}

    l_in = InputLayer(shape=(None,fingerprint_dim), input_var=fingerprint_vals)

    #do dropout
    network_vals['drop0'] = dropout(l_in,p=dropout_prob)

    #run through the layers I have
    for layerNum,hiddenUnits in enumerate(neural_net):
        oldLayerNum = layerNum
        currLayerNum = layerNum + 1
        network_vals['dense'+str(currLayerNum)] = DenseLayer(network_vals['drop'+str(oldLayerNum)], \
            hiddenUnits,nonlinearity=lasagne.nonlinearities.rectify)
        network_vals['drop'+str(currLayerNum)] = dropout(network_vals['dense'+str(currLayerNum)],p=dropout_prob)


    if neural_net == []:
        network_vals['final_out'] = l_in
    else:
        network_vals['final_out'] = network_vals['dense'+str(currLayerNum)]


    #finally, project it into the dimensionality we want
    network_vals['output'] = DenseLayer(network_vals['final_out'],\
        num_units=output_dim, nonlinearity=final_layer_type)

    return network_vals
示例#5
0
def architecture(input_var, input_shape):
    network_trained = {}
    network_trained['input'] = InputLayer(input_shape, input_var)
    kwargs = dict(nonlinearity=lasagne.nonlinearities.leaky_rectify,
                  W=lasagne.init.Orthogonal())
    network_trained['conv1'] = Conv2DLayer(network_trained['input'], 64, 3,
                                           **kwargs)
    network_trained['conv2'] = Conv2DLayer(network_trained['conv1'], 32, 3,
                                           **kwargs)
    network_trained['mp3'] = MaxPool2DLayer(network_trained['conv2'], 3)
    network_trained['conv4'] = Conv2DLayer(network_trained['mp3'], 128, 3,
                                           **kwargs)
    network_trained['conv5'] = Conv2DLayer(network_trained['conv4'], 64, 3,
                                           **kwargs)
    network_trained['mp6'] = MaxPool2DLayer(network_trained['conv5'], 3)
    network_trained['fc7'] = DenseLayer(dropout(network_trained['mp6'], 0.5),
                                        256, **kwargs)
    network_trained['fc8'] = DenseLayer(dropout(network_trained['fc7'], 0.5),
                                        64, **kwargs)
    network_trained['fc9'] = DenseLayer(
        dropout(network_trained['fc8'], 0.5),
        1,
        nonlinearity=lasagne.nonlinearities.sigmoid,
        W=lasagne.init.Orthogonal())

    return network_trained
示例#6
0
    def build_network(self, mfcc_input_var):
        print('Building cnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        mfcc_network = layers.InputLayer((None, 130, MC_LENGTH), mfcc_input_var)
        mfcc_network = layers.BatchNormLayer(mfcc_network)
        mfcc_network = self.set_conv_layer(mfcc_network, 'conv_1', bnorm=False)
        mfcc_network = self.set_pool_layer(mfcc_network, 'pool_1')
        mfcc_network = self.set_conv_layer(mfcc_network, 'conv_2', bnorm=False)
        mfcc_network = self.set_pool_layer(mfcc_network, 'pool_2')
        for n in self.net_opts['layer_list']:
            # mfcc_network = layers.batch_norm(layers.DenseLayer(layers.dropout(mfcc_network, p=self.net_opts['dropout_p']), 
            #                                  n, 
            #                                  nonlinearity=lasagne.nonlinearities.rectify)
            #                                 )
            mfcc_network = layers.DenseLayer(layers.dropout(mfcc_network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
            # mfcc_network = layers.BatchNormLayer(mfcc_network)
        mfcc_network = layers.DenseLayer(layers.dropout(mfcc_network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        self.network = mfcc_network
        return self.network
示例#7
0
def build_cascade(input_var, nb_classes, n_chanels=1, input_size=20, reshaped_input_size=20, activity=softmax):
    """
    Builds the complete network with 1D-conv1d layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :return: a pointer to the output of last layer
    """
    # Input layer
    network = InputLayer(shape=(None, 1, input_size), input_var=input_var)

    network = ReshapeLayer(network, (([0], n_chanels, reshaped_input_size)))

    network = Conv1DLayer(network, 1024, 5)

    network = MaxPool1DLayer(network, 2)

    network = DimshuffleLayer(network, (0, 2, 1))

    network = LSTMLayer(network, num_units=256, grad_clipping=100, nonlinearity=tanh)

    network = SliceLayer(network, -1, 1)

    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    network = DenseLayer(dropout(network, p=.5),
            num_units=64, nonlinearity=rectify)

    # And, finally, the output layer with 50% dropout on its inputs:
    network = DenseLayer(dropout(network, p=.5),
            num_units=nb_classes, nonlinearity=activity)

    return network
示例#8
0
def build_model(timesteps, pX, pY):
    net = OrderedDict()
    net['input'] = InputLayer(shape=(None, timesteps, pX, pY))
    net['conv1_1'] = batch_norm(
        ConvLayer(net['input'],
                  num_filters=32,
                  filter_size=(3, 3),
                  nonlinearity=ReLU,
                  pad='same'))

    net['conv1_2'] = batch_norm(
        ConvLayer(net['conv1_1'],
                  num_filters=32,
                  filter_size=(3, 3),
                  nonlinearity=ReLU,
                  pad='same'))

    net['pool1'] = PoolLayer(net['conv1_2'], pool_size=(2, 2))

    net['conv2_1'] = batch_norm(
        ConvLayer(net['pool1'],
                  num_filters=64,
                  filter_size=(3, 3),
                  nonlinearity=ReLU,
                  pad='same'))

    net['conv2_2'] = batch_norm(
        ConvLayer(net['conv2_1'],
                  num_filters=64,
                  filter_size=(3, 3),
                  nonlinearity=ReLU,
                  pad='same'))

    net['pool2'] = PoolLayer(net['conv2_2'], pool_size=(2, 2))

    net['conv3_1'] = batch_norm(
        ConvLayer(net['pool2'],
                  num_filters=128,
                  filter_size=(3, 3),
                  nonlinearity=ReLU,
                  pad='same'))

    net['conv3_2'] = batch_norm(
        ConvLayer(net['conv3_1'],
                  num_filters=128,
                  filter_size=(3, 3),
                  nonlinearity=ReLU,
                  pad='same'))

    net['pool3'] = PoolLayer(net['conv3_2'], pool_size=(2, 2))

    net['fcLayer1'] = batch_norm(
        DenseLayer(dropout(net['pool3'], p=0.5),
                   num_units=512,
                   nonlinearity=ReLU))

    net['output'] = DenseLayer(dropout(net['fcLayer1'], p=0.5),
                               num_units=2,
                               nonlinearity=softmax)
    return net
示例#9
0
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw dnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, 3969), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_1', dropout=False, pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_2', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_3', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_4', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, MC_LENGTH), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list, axis=1, cropping=[None, None, 'center'])
        network = layers.BatchNormLayer(network)
        for n in self.net_opts['layer_list']:
            network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        # print(layers.get_output_shape(network))
        self.network = network
        return self.network
示例#10
0
文件: models.py 项目: srviest/SoloLa-
    def build_network(self, mfcc_input_var):
        print('Building cnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        mfcc_network = layers.InputLayer((None, 130, MC_LENGTH), mfcc_input_var)
        mfcc_network = layers.BatchNormLayer(mfcc_network)
        mfcc_network = self.set_conv_layer(mfcc_network, 'conv_1', bnorm=False)
        mfcc_network = self.set_pool_layer(mfcc_network, 'pool_1')
        mfcc_network = self.set_conv_layer(mfcc_network, 'conv_2', bnorm=False)
        mfcc_network = self.set_pool_layer(mfcc_network, 'pool_2')
        for n in self.net_opts['layer_list']:
            # mfcc_network = layers.batch_norm(layers.DenseLayer(layers.dropout(mfcc_network, p=self.net_opts['dropout_p']), 
            #                                  n, 
            #                                  nonlinearity=lasagne.nonlinearities.rectify)
            #                                 )
            mfcc_network = layers.DenseLayer(layers.dropout(mfcc_network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
            # mfcc_network = layers.BatchNormLayer(mfcc_network)
        mfcc_network = layers.DenseLayer(layers.dropout(mfcc_network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        self.network = mfcc_network
        return self.network
示例#11
0
    def __init__(
        self,
        n_words,
        dim_emb,
        num_units,
        n_classes,
        w_emb=None,
        dropout=0.2,
        use_final=False,
        lr=0.001,
        pretrain=None,
    ):
        self.n_words = n_words
        self.dim_emb = dim_emb
        self.num_units = num_units
        self.n_classes = n_classes
        self.lr = lr

        if w_emb is None:
            w_emb = init.Normal()

        self.l_x = layers.InputLayer((None, None))
        self.l_m = layers.InputLayer((None, None))
        self.l_emb = layers.EmbeddingLayer(self.l_x, n_words, dim_emb, W=w_emb)
        self.l_ebd = self.l_emb

        if dropout:
            self.l_emb = layers.dropout(self.l_emb, dropout)

        if use_final:
            self.l_enc = layers.LSTMLayer(self.l_emb,
                                          num_units,
                                          mask_input=self.l_m,
                                          only_return_final=True,
                                          grad_clipping=10.0,
                                          gradient_steps=400)
            self.l_rnn = self.l_enc
        else:
            self.l_enc = layers.LSTMLayer(self.l_emb,
                                          num_units,
                                          mask_input=self.l_m,
                                          only_return_final=False,
                                          grad_clipping=10.0,
                                          gradient_steps=400)
            self.l_rnn = self.l_enc
            self.l_enc = MeanLayer(self.l_enc, self.l_m)

        if dropout:
            self.l_enc = layers.dropout(self.l_enc, dropout)

        self.l_y = layers.DenseLayer(self.l_enc,
                                     n_classes,
                                     nonlinearity=nonlinearities.softmax)

        if pretrain:
            self.load_pretrain(pretrain)
示例#12
0
def fcn_transfer(params):
    """"""
    assert 'inputs' in params
    layers = L.InputLayer((None, 256 * len(params['inputs'])))
    layers = L.dropout(layers)

    layers = L.DenseLayer(layers, 1024, nonlinearity=nl.elu)
    layers = L.dropout(layers)

    layers = L.DenseLayer(layers, 16, nonlinearity=nl.softmax)
    return layers
示例#13
0
def build_network(input_var, image_size=28, output_dim=10):

    nonlin = lasagne.nonlinearities.rectify
    W_init = lasagne.init.GlorotUniform()
    b_init = lasagne.init.Constant(0.)

    input_shape = (None, 1, image_size, image_size)

    network = nn.InputLayer(input_shape, input_var)

    network = nn.Conv2DLayer(network,
                             num_filters=64,
                             filter_size=(3, 3),
                             nonlinearity=nonlin,
                             W=W_init,
                             b=b_init)
    network = nn.Conv2DLayer(network,
                             num_filters=64,
                             filter_size=(3, 3),
                             nonlinearity=nonlin,
                             W=W_init,
                             b=b_init)
    network = nn.MaxPool2DLayer(network, pool_size=(2, 2))

    network = nn.Conv2DLayer(network,
                             num_filters=128,
                             filter_size=(3, 3),
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.Conv2DLayer(network,
                             num_filters=128,
                             filter_size=(3, 3),
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.MaxPool2DLayer(network, pool_size=(2, 2))

    network = nn.dropout(network, p=0.5)
    network = nn.DenseLayer(network,
                            num_units=256,
                            W=W_init,
                            b=b_init,
                            nonlinearity=nonlin)

    network = nn.dropout(network, p=0.5)
    network = nn.DenseLayer(network,
                            num_units=output_dim,
                            W=W_init,
                            b=b_init,
                            nonlinearity=None)

    return network
示例#14
0
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw network with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, None), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_1',
                                           dropout=False,
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_2',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_3',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_4',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, None), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list,
                                     axis=1,
                                     cropping=[None, None, 'center'])
        network = self.set_conv_layer(network, 'conv_1')
        network = self.set_pool_layer(network, 'pool_1')
        network = self.set_conv_layer(network, 'conv_2')
        network = self.set_pool_layer(network, 'pool_2')
        network = self.set_conv_layer(network, 'conv_3')
        network = layers.GlobalPoolLayer(
            network, getattr(T, self.net_opts['global_pool_func']))
        # print(layers.get_output_shape(network))
        # network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']),
        #                           self.net_opts['dens_1'],
        #                           nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(
            layers.dropout(network, p=self.net_opts['dropout_p']),
            self.net_opts['dens_2'],
            nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(
            layers.dropout(network, p=self.net_opts['dropout_p']),
            self.net_opts['num_class'],
            nonlinearity=lasagne.nonlinearities.softmax)
        # print(layers.get_output_shape(network))
        self.network = network
        return self.network
示例#15
0
    def __init__(self, vocab, num_users):
        self.vocab = vocab

        self._user_id = T.ivector('user ids')
        self._good_utterance = T.imatrix('utterance from user')
        self._bad_utterance = T.imatrix('utterance not from user')

        self.l_utt_enc = Enc(vocab)

        self._user_inp = InputLayer((None, ),
                                    input_var=self._user_id,
                                    name='user ids layer')
        self.l_user_emb = EmbeddingLayer(self._user_inp,
                                         num_users,
                                         DssmConfig.USER_EMB_SIZE,
                                         name='user embedding')
        self.l_user_semantic = DenseLayer(self.l_user_emb,
                                          DssmConfig.SEMANTIC_SPACE_SIZE,
                                          name='user representation')
        self.l_user_semantic = dropout(self.l_user_semantic,
                                       p=DssmConfig.DROPOUT_RATE)

        self.l_utt_semantic = DenseLayer(self.l_utt_enc.output,
                                         DssmConfig.SEMANTIC_SPACE_SIZE,
                                         name='utterance representation')
        self.l_utt_semantic = dropout(self.l_utt_semantic,
                                      p=DssmConfig.DROPOUT_RATE)

        self.user_semantic = get_output(self.l_user_semantic)
        self.user_semantic_d = get_output(self.l_user_semantic,
                                          deterministic=True)

        self.good_utt_semantic = get_output(
            self.l_utt_semantic,
            inputs={self.l_utt_enc.l_in: self._good_utterance})
        self.good_utt_semantic_d = get_output(
            self.l_utt_semantic,
            inputs={self.l_utt_enc.l_in: self._good_utterance},
            deterministic=True)

        self.bad_utt_semantic = get_output(
            self.l_utt_semantic,
            inputs={self.l_utt_enc.l_in: self._bad_utterance})
        self.bad_utt_semantic_d = get_output(
            self.l_utt_semantic,
            inputs={self.l_utt_enc.l_in: self._bad_utterance},
            deterministic=True)

        self._build_loss_and_ops()
示例#16
0
def build_network(input_var=None, input_shape=227):

    nf = 32
    n = lasagne.nonlinearities.tanh
    W_init = lasagne.init.GlorotUniform()

    net = nn.InputLayer((None, 3, None, None), input_var=input_var)

    # Block 1
    net = nn.Conv2DLayer(net, nf, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.Conv2DLayer(net, nf, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.MaxPool2DLayer(net, 2)

    # Block 2
    net = nn.Conv2DLayer(net, nf * 2, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.Conv2DLayer(net, nf * 2, 3, W=W_init, nonlinearity=n, pad='same')
    net = nn.SpatialPyramidPoolingLayer(net, [4, 2, 1],
                                        implementation='kaiming')

    net = nn.DenseLayer(net, 512, W=W_init, nonlinearity=n)
    net = nn.dropout(net, p=0.5)

    net = nn.DenseLayer(net, 128, W=W_init, nonlinearity=n)

    return nn.DenseLayer(net, 1, W=W_init, nonlinearity=T.nnet.sigmoid)
示例#17
0
def build_computation_graph(input_var, input_shape, dimensions, p_input=0.2, p_weight=0.5):
    #dimension[-1][-1] is the last output size of last stacked layer a.k.a size of the image vector
    input = InputLayer(shape=input_shape, input_var=input_var, name='input')
    input = dropout(input, p=p_input, name='input_drop')
    network, classification_branch, features = NecklaceNetwork(input, dimensions, LISTAWithDropout, True, True, True,
                                                               False, p_weight)
    return network, classification_branch, features
示例#18
0
def make_Nconvpool_1dense_branch(view, input_layer, cpdictlist, nhidden=256, dropoutp=0.5):
    """
    see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
    loop through the `cpdictlist` for set of convolutional filter and pooling
    parameter specifications; when through, add a dense layer with dropout
    """
    net = {}
    convname = ""
    mpname = ""
    for i, cpdict in enumerate(cpdictlist):
        convname = "conv-{}-{}".format(view, i)
        logger.info("Convpool {} params: {}".format(convname, cpdict))
        # the first time through, use `input`, after use the last layer
        # from the previous iteration - ah loose scoping rules...
        if i == 0:
            layer = input_layer
        else:
            layer = net[mpname]
        net[convname] = Conv2DLayer(
            layer,
            num_filters=cpdict["nfilters"],
            filter_size=cpdict["filter_size"],
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),
        )
        mpname = "maxpool-{}-{}".format(view, i)
        net[mpname] = MaxPool2DLayer(net[convname], pool_size=cpdict["pool_size"])
        logger.info("Convpool {}".format(mpname))
    densename = "dense-{}".format(view)
    net[densename] = DenseLayer(
        dropout(net[mpname], p=dropoutp), num_units=nhidden, nonlinearity=lasagne.nonlinearities.rectify
    )
    logger.info("Dense {} with nhidden = {}, dropout = {}".format(densename, nhidden, dropoutp))
    return net
示例#19
0
def architecture(input_var, input_shape):
    layer = InputLayer(input_shape, input_var)
    kwargs = dict(nonlinearity=lasagne.nonlinearities.leaky_rectify,
                  W=lasagne.init.Orthogonal())
    layer = Conv2DLayer(layer, 64, 3, **kwargs)
    layer = Conv2DLayer(layer, 32, 3, **kwargs)
    layer = MaxPool2DLayer(layer, 3)
    layer = Conv2DLayer(layer, 128, 3, **kwargs)
    layer = Conv2DLayer(layer, 64, 3, **kwargs)
    layer = MaxPool2DLayer(layer, 3)
    layer = DenseLayer(dropout(layer, 0.5), 256, **kwargs)
    layer = DenseLayer(dropout(layer, 0.5), 64, **kwargs)
    layer = DenseLayer(dropout(layer, 0.5), 1,
                       nonlinearity=lasagne.nonlinearities.sigmoid,
                       W=lasagne.init.Orthogonal())
    return layer
示例#20
0
def build_model(input_shape, input_var, dense=True):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = ConvLayer(net['input'], num_filters=128, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2'] = ConvLayer(net['conv1'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['pool1'] = ConvLayer(net['conv2'], num_filters=256, filter_size=3, stride=2, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv3'] = ConvLayer(net['pool1'], num_filters=512, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['pool2'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, stride=2, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    if dense:
        net['dense'] = dropout(DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify), 0.5)
        # Deconv
        net['dense/inverse'] = inverse_dense_layer(net['dense'], net['dense'], net['pool2'].output_shape)
        net['pool2/inverse'] = inverse_convolution_strided_layer(net['dense/inverse'], net['pool2'])
    else:
        net['pool2/inverse'] = inverse_convolution_strided_layer(net['pool2'], net['pool2'])
    net['conv3/inverse'] = inverse_convolution_layer(net['pool2/inverse'], net['conv3'])
    net['pool1/inverse'] = inverse_convolution_strided_layer(net['conv3/inverse'], net['pool1'])
    net['conv2/inverse'] = inverse_convolution_layer(net['pool1/inverse'], net['conv2'])
    net['conv1/inverse'] = inverse_convolution_layer(net['conv2/inverse'], net['conv1'])
    net['conv0/inverse'] = ConvLayer(net['conv1/inverse'], num_filters=input_shape[1], filter_size=1, nonlinearity=nonlinearities.linear, pad='same')

    net['prob'] = net['conv0/inverse']

    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
示例#21
0
 def make_branch(view, input_layer, cpdictlist, nhidden=256, dropoutp=0.5):
     """
     see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
     convolution only - no pooling
     """
     net = {}
     convname = ""
     prev_layername = ""
     for i, cpdict in enumerate(cpdictlist):
         convname = "conv-{}-{}".format(view, i)
         logger.info("Convpool {} params: {}".format(convname, cpdict))
         # the first time through, use `input`, after use the last layer
         # from the previous iteration - ah loose scoping rules...
         if i == 0:
             layer = input_layer
         else:
             layer = net[prev_layername]
         net[convname] = Conv2DLayer(
             layer,
             num_filters=cpdict["nfilters"],
             filter_size=cpdict["filter_size"],
             nonlinearity=lasagne.nonlinearities.rectify,
             W=lasagne.init.GlorotUniform(),
         )
         prev_layername = convname
     densename = "dense-{}".format(view)
     net[densename] = DenseLayer(
         dropout(net[convname], p=dropoutp), num_units=nhidden, nonlinearity=lasagne.nonlinearities.rectify
     )
     logger.info("Dense {} with nhidden = {}, dropout = {}".format(densename, nhidden, dropoutp))
     return net
 def make_branch(view, input_layer, cpdictlist, nhidden=256, dropoutp=0.5):
     """
     see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
     convolution only - no pooling
     """
     net = {}
     convname = ''
     prev_layername = ''
     for i, cpdict in enumerate(cpdictlist):
         convname = 'conv-{}-{}'.format(view, i)
         logger.info("Convpool {} params: {}".format(convname, cpdict))
         # the first time through, use `input`, after use the last layer
         # from the previous iteration - ah loose scoping rules...
         if i == 0:
             layer = input_layer
         else:
             layer = net[prev_layername]
         net[convname] = Conv2DLayer(
             layer, num_filters=cpdict['nfilters'],
             filter_size=cpdict['filter_size'],
             nonlinearity=lasagne.nonlinearities.rectify,
             W=lasagne.init.GlorotUniform())
         prev_layername = convname
     densename = 'dense-{}'.format(view)
     net[densename] = DenseLayer(
         dropout(net[convname], p=dropoutp),
         num_units=nhidden,
         nonlinearity=lasagne.nonlinearities.rectify)
     logger.info("Dense {} with nhidden = {}, dropout = {}".format(
         densename, nhidden, dropoutp))
     return net
示例#23
0
    def __init__(self, input_layer, output_dim, hidden_sizes,
                 hidden_act=nonlinearities.tanh,
                 output_act=nonlinearities.identity,
                 params=None,
                 batch_norm=False,
                 dropout=False):
        out_layer = input_layer

        param_idx = 0
        for hidden_size in hidden_sizes:
            w_args = {}
            if params is not None:
                w_args = dict(W=params[param_idx], b=params[param_idx+1])
            out_layer = L.DenseLayer(out_layer, hidden_size, nonlinearity=hidden_act,
                                     **w_args)
            if batch_norm:
                out_layer = L.batch_norm(out_layer)
            if dropout:
                out_layer = L.dropout(out_layer)
            param_idx += 2

        w_args = {}
        if params is not None:
            w_args = dict(W=params[param_idx], b=params[param_idx + 1])
        out_layer = L.DenseLayer(out_layer, output_dim, nonlinearity=output_act,
                                 **w_args)

        self.out_layer = out_layer
        self.output = L.get_output(self.out_layer)
示例#24
0
def build_network_zeta(
    inputlist, imgh=(50, 25, 25), imgw=127, convpooldictlist=None, nhidden=None, dropoutp=None, noutputs=11, depth=1
):
    """
    here, `inputlist` should have img tensors for x, u, v, and for muon_data

    here, `imgh` is a tuple of sizes for `(x, u, v)`. `imgw` is the same
    for all three views.

    also, the `convpooldictlist` here must be a dictionary of dictionaries,
    with the set of convolution and pooling defined independently for 'x', 'u',
    and 'v' - e.g., `convpooldictlist['x']` will be a dictionary similar to
    the dictionaries used by network models like `beta`, etc.
    """
    net = {}
    # Input layer
    input_var_x, input_var_u, input_var_v, input_var_muon = inputlist[0], inputlist[1], inputlist[2], inputlist[3]
    net["input-x"] = InputLayer(shape=(None, depth, imgw, imgh[0]), input_var=input_var_x)
    net["input-u"] = InputLayer(shape=(None, depth, imgw, imgh[1]), input_var=input_var_u)
    net["input-v"] = InputLayer(shape=(None, depth, imgw, imgh[2]), input_var=input_var_v)
    net["input-muon-dat"] = InputLayer(shape=(None, 10), input_var=input_var_muon)

    if convpooldictlist is None:
        raise Exception("Conv-pool dictionaries must be defined!")

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    net.update(make_Nconvpool_1dense_branch("x", net["input-x"], convpooldictlist["x"], nhidden, dropoutp))
    net.update(make_Nconvpool_1dense_branch("u", net["input-u"], convpooldictlist["u"], nhidden, dropoutp))
    net.update(make_Nconvpool_1dense_branch("v", net["input-v"], convpooldictlist["v"], nhidden, dropoutp))

    # put a softmax on the muon vars
    net["softed-muon-dat"] = DenseLayer(
        net["input-muon-dat"], num_units=noutputs, nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax on muon dat with n_units = {}".format(noutputs))

    # Concatenate the parallel inputs, include the muon data
    net["concat"] = ConcatLayer((net["dense-x"], net["dense-u"], net["dense-v"], net["softed-muon-dat"]))
    logger.info("Network: concat columns...")

    # One more dense layer
    net["dense-across"] = DenseLayer(
        dropout(net["concat"], p=dropoutp), num_units=(nhidden // 2), nonlinearity=lasagne.nonlinearities.rectify
    )
    logger.info("Dense {} with nhidden = {}, dropout = {}".format("dense-across", nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net["output_prob"] = DenseLayer(
        net["dense-across"], num_units=noutputs, nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: %s" % lasagne.layers.count_params(net["output_prob"]))
    return net["output_prob"]
示例#25
0
def build_triamese_delta(
    inputlist, imgh=68, imgw=127, convpooldictlist=None, nhidden=None, dropoutp=None, noutputs=67, depth=1
):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using two layers of convolutions and pooling.

    This model is basically identical to the `beta` model, except we have
    a softmax output of `noutputs` (def 67) for the full set of planecodes.
    """
    net = {}
    # Input layer
    input_var_x, input_var_u, input_var_v = inputlist[0], inputlist[1], inputlist[2]
    tshape = (None, depth, imgw, imgh)
    net["input-x"] = InputLayer(shape=tshape, input_var=input_var_x)
    net["input-u"] = InputLayer(shape=tshape, input_var=input_var_u)
    net["input-v"] = InputLayer(shape=tshape, input_var=input_var_v)

    if convpooldictlist is None:
        convpooldictlist = []
        convpool1dict = {}
        convpool1dict["nfilters"] = 32
        convpool1dict["filter_size"] = (3, 3)
        convpool1dict["pool_size"] = (2, 2)
        convpooldictlist.append(convpool1dict)
        convpool2dict = {}
        convpool2dict["nfilters"] = 32
        convpool2dict["filter_size"] = (3, 3)
        convpool2dict["pool_size"] = (2, 2)
        convpooldictlist.append(convpool2dict)

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    net.update(make_Nconvpool_1dense_branch("x", net["input-x"], convpooldictlist, nhidden, dropoutp))
    net.update(make_Nconvpool_1dense_branch("u", net["input-u"], convpooldictlist, nhidden, dropoutp))
    net.update(make_Nconvpool_1dense_branch("v", net["input-v"], convpooldictlist, nhidden, dropoutp))

    # Concatenate the two parallel inputs
    net["concat"] = ConcatLayer((net["dense-x"], net["dense-u"], net["dense-v"]))
    logger.info("Network: concat columns...")

    # One more dense layer
    net["dense-across"] = DenseLayer(
        dropout(net["concat"], p=dropoutp), num_units=(nhidden // 2), nonlinearity=lasagne.nonlinearities.rectify
    )
    logger.info("Dense {} with nhidden = {}, dropout = {}".format("dense-across", nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net["output_prob"] = DenseLayer(
        net["dense-across"], num_units=noutputs, nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: {}".format(lasagne.layers.count_params(net["output_prob"])))
    return net["output_prob"]
示例#26
0
文件: models.py 项目: srviest/SoloLa-
    def build_network(self, mspec_input_var):
        print('Building spec dnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        mspec_network = layers.InputLayer((None, 130, MC_LENGTH), mspec_input_var)
        mspec_network = layers.BatchNormLayer(mspec_network)
        for n in self.net_opts['layer_list']:
            mspec_network = layers.DenseLayer(layers.dropout(mspec_network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
        mspec_network = layers.DenseLayer(layers.dropout(mspec_network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        self.network = mspec_network
        return self.network
示例#27
0
    def build_network(self, mspec_input_var):
        print('Building spec dnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        mspec_network = layers.InputLayer((None, 130, MC_LENGTH), mspec_input_var)
        mspec_network = layers.BatchNormLayer(mspec_network)
        for n in self.net_opts['layer_list']:
            mspec_network = layers.DenseLayer(layers.dropout(mspec_network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
        mspec_network = layers.DenseLayer(layers.dropout(mspec_network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        self.network = mspec_network
        return self.network
def output_block(net, config, non_lin, verbose=True):
    """
    """
    # output setting
    out_acts = []
    for out_act in config.hyper_parameters.out_act:
        exec('from lasagne.nonlinearities import {}'.format(out_act))
        out_acts.append(eval(out_act))
    n_outs = config.hyper_parameters.n_out

    # Global Average Pooling
    last_conv_block_name = next(reversed(net))
    net['gap'] = L.GlobalPoolLayer(net[last_conv_block_name], name='gap')
    net['gap.bn'] = L.BatchNormLayer(net['gap'], name='gap.bn')
    n_features = net['gap.bn'].output_shape[-1]

    # feature Layer
    net['fc'] = L.dropout(L.batch_norm(
        L.DenseLayer(net['gap.bn'],
                     num_units=n_features,
                     nonlinearity=non_lin,
                     name='fc')),
                          name='fc.bn.do')

    # output (prediction)
    # check whether the model if for MTL or STL
    # target is passed as list, regardless whether
    # it's MTL or STL (configuration checker checks it)
    targets = config.target
    out_layer_names = []
    for target, n_out, out_act in zip(targets, n_outs, out_acts):

        out_layer_names.append('out.{}'.format(target))

        if target == 'self':
            net[out_layer_names[-1]], inputs = build_siamese(net['fc'])
        else:
            net[out_layer_names[-1]] = L.DenseLayer(net['fc'],
                                                    num_units=n_out,
                                                    nonlinearity=out_act,
                                                    name=out_layer_names[-1])
            inputs = [net['input'].input_var]

    # make a concatation layer just for save/load purpose
    net['IO'] = L.ConcatLayer([
        L.FlattenLayer(net[target_layer_name])
        if target == 'self' else net[target_layer_name]
        for target_layer_name in out_layer_names
    ],
                              name='IO')

    if verbose:
        print(net['gap.bn'].output_shape)
        print(net['fc'].output_shape)
        for target in targets:
            print(net['out.{}'.format(target)].output_shape)

    return net, inputs
def build_triamese_inception(inputlist, imgh=50, imgw=50):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using a slightly modified set of Google inception modules
    """
    input_var_x, input_var_u, input_var_v = \
        inputlist[0], inputlist[1], inputlist[2]
    net = {}
    # Input layer
    tshape = (None, 1, imgw, imgh)
    net['input_x'] = InputLayer(shape=tshape, input_var=input_var_x)
    net['input_u'] = InputLayer(shape=tshape, input_var=input_var_u)
    net['input_v'] = InputLayer(shape=tshape, input_var=input_var_v)

    # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
    nfilters = [32, 64, 96, 128, 16, 32]
    net.update(build_inception_module('inc_x1', net['input_x'], nfilters))
    net.update(build_inception_module('inc_u1', net['input_u'], nfilters))
    net.update(build_inception_module('inc_v1', net['input_v'], nfilters))

    net['dense_x'] = DenseLayer(
        dropout(flatten(net['inc_x1/output']), p=.5),
        num_units=100, nonlinearity=lasagne.nonlinearities.rectify)
    net['dense_u'] = DenseLayer(
        dropout(flatten(net['inc_u1/output']), p=.5),
        num_units=100, nonlinearity=lasagne.nonlinearities.rectify)
    net['dense_v'] = DenseLayer(
        dropout(flatten(net['inc_v1/output']), p=.5),
        num_units=100, nonlinearity=lasagne.nonlinearities.rectify)

    # Concatenate the parallel inputs
    net['concat'] = ConcatLayer((net['dense_x'],
                                 net['dense_u'],
                                 net['dense_v']))

    # And, finally, the 11-unit output layer with 50% dropout on its inputs:
    net['output_prob'] = DenseLayer(
        dropout(net['concat'], p=.5),
        num_units=11,
        nonlinearity=lasagne.nonlinearities.softmax)

    logger.info("n-parameters: {}".format(
        lasagne.layers.count_params(net['output_prob']))
    )
    return net['output_prob']
示例#30
0
    def build_ae(self):

        input_layer = InputLayer(shape=(None, self.num_vars * self.num_channels), input_var=self.input_var)

        self.hidden = DenseLayer(
            dropout(input_layer, p=self.dropout_rate), num_units=self.nodes, nonlinearity=self.activation
        )

        self.network = DenseLayer(self.hidden, num_units=self.num_vars, W=self.hidden.W.T, nonlinearity=linear)
示例#31
0
def build_triamese_inception(inputlist, imgh=50, imgw=50):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using a slightly modified set of Google inception modules
    """
    input_var_x, input_var_u, input_var_v = \
        inputlist[0], inputlist[1], inputlist[2]
    net = {}
    # Input layer
    tshape = (None, 1, imgw, imgh)
    net['input_x'] = InputLayer(shape=tshape, input_var=input_var_x)
    net['input_u'] = InputLayer(shape=tshape, input_var=input_var_u)
    net['input_v'] = InputLayer(shape=tshape, input_var=input_var_v)

    # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
    nfilters = [32, 64, 96, 128, 16, 32]
    net.update(build_inception_module('inc_x1', net['input_x'], nfilters))
    net.update(build_inception_module('inc_u1', net['input_u'], nfilters))
    net.update(build_inception_module('inc_v1', net['input_v'], nfilters))

    net['dense_x'] = DenseLayer(
        dropout(flatten(net['inc_x1/output']), p=.5),
        num_units=100, nonlinearity=lasagne.nonlinearities.rectify)
    net['dense_u'] = DenseLayer(
        dropout(flatten(net['inc_u1/output']), p=.5),
        num_units=100, nonlinearity=lasagne.nonlinearities.rectify)
    net['dense_v'] = DenseLayer(
        dropout(flatten(net['inc_v1/output']), p=.5),
        num_units=100, nonlinearity=lasagne.nonlinearities.rectify)

    # Concatenate the parallel inputs
    net['concat'] = ConcatLayer((net['dense_x'],
                                 net['dense_u'],
                                 net['dense_v']))

    # And, finally, the 11-unit output layer with 50% dropout on its inputs:
    net['output_prob'] = DenseLayer(
        dropout(net['concat'], p=.5),
        num_units=11,
        nonlinearity=lasagne.nonlinearities.softmax)

    print("n-parameters: ", lasagne.layers.count_params(net['output_prob']))
    return net['output_prob']
示例#32
0
def build_mix(input_var, nb_classes, n_chanels=1, input_size=20, reshaped_input_size=20, activity=softmax):
    """
    Builds the complete network with 1D-conv1d layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :return: a pointer to the output of last layer
    """
    # Input layer
    input = InputLayer(shape=(None, 1, input_size), input_var=input_var)

    input = ReshapeLayer(input, (([0], n_chanels, reshaped_input_size)))

    conv1 = Conv1DLayer(input, 1024, 5)

    pool1 = MaxPool1DLayer(conv1, 2)

    conv2 = Conv1DLayer(pool1, 512, 5)

    pool2 = MaxPool1DLayer(conv2, 2)

    conv3 = Conv1DLayer(pool2, 256, 2)

    conv_layer = FlattenLayer(conv3)

    rnn_input = DimshuffleLayer(conv2, (0, 2, 1))

    rnnpool = LSTMLayer(rnn_input, num_units=256, nonlinearity=tanh)

    rnn_layer = SliceLayer(rnnpool, -1, 1)

    network = ConcatLayer([conv_layer, rnn_layer])

    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    network = DenseLayer(dropout(network, p=.5),
            num_units=64, nonlinearity=rectify)

    # And, finally, the output layer with 50% dropout on its inputs:
    network = DenseLayer(dropout(network, p=.5),
            num_units=nb_classes, nonlinearity=activity)

    return network
示例#33
0
def build_partitioned(input_var, nb_classes, n_chanels, input_size, reshaped_input_size, activity=softmax):
    """
    Builds the complete network with 1D-conv1d layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :return: a pointer to the output of last layer
    """
    # Input layer
    input_layer = InputLayer(shape=(None, 1, input_size), input_var=input_var)

    input_layer = ReshapeLayer(input_layer, (([0], n_chanels, reshaped_input_size)))

    #slice for partition
    input_layers = []
    for ix in range(n_chanels):
        input_layers.append(SliceLayer(input_layer, indices=slice(ix, ix+1), axis=1))


    #dnn
    networks = []
    for input_layer in input_layers:

        tmp = DenseLayer(dropout(input_layer, p=.2),
            num_units=10, nonlinearity=rectify)
        tmp = DenseLayer(dropout(tmp, p=.5),
                                   num_units=3, nonlinearity=rectify)
        '''
        tmp = Conv1DLayer(input_layer, 8, 5)
        tmp = MaxPool1DLayer(tmp, 2)
        tmp = Conv1DLayer(tmp, 8, 5)
        tmp = MaxPool1DLayer(tmp, 2)
        '''
        networks.append(tmp)

    network = ConcatLayer(networks)

    network = DenseLayer(dropout(network, p=.5),
            num_units=nb_classes, nonlinearity=activity)


    return network
示例#34
0
 def set_conv_layer(self, network, layer_name, dropout=True, pad=0, bnorm=False):
     opts = self.net_opts[layer_name]
     ll = layers.Conv1DLayer(
             layers.dropout(network, p=self.net_opts['dropout_p']) if dropout else network,
             num_filters=opts['num_filters'],
             filter_size=opts['filter_size'],
             stride=opts['stride'],
             pad=pad,
             name=layer_name
          )
     return layers.batch_norm(ll) if bnorm else ll
示例#35
0
文件: models.py 项目: srviest/SoloLa-
 def set_conv_layer(self, network, layer_name, dropout=True, pad=0, bnorm=False):
     opts = self.net_opts[layer_name]
     ll = layers.Conv1DLayer(
             layers.dropout(network, p=self.net_opts['dropout_p']) if dropout else network,
             num_filters=opts['num_filters'],
             filter_size=opts['filter_size'],
             stride=opts['stride'],
             pad=pad,
             name=layer_name
          )
     return layers.batch_norm(ll) if bnorm else ll
示例#36
0
def build_beta_single_view(
    inputlist, view="x", imgh=68, imgw=127, convpooldictlist=None, nhidden=None, dropoutp=None, noutputs=11, depth=1
):
    """
    This network is modeled after the 'triamese' (tri-columnar) beta model,
    but is meant to operate on one view only.

    This function has a differen signature than the rest of the functions
    in this module, so it is really not meant to be used as a `build_cnn`
    function in the runner scripts (although, in Python, that would work).
    """
    net = {}
    # Input layer
    input_var = inputlist[0]
    tshape = (None, depth, imgw, imgh)
    input_name = "input-" + view
    net[input_name] = InputLayer(shape=tshape, input_var=input_var)

    if convpooldictlist is None:
        convpooldictlist = []
        convpool1dict = {}
        convpool1dict["nfilters"] = 32
        convpool1dict["filter_size"] = (3, 3)
        convpool1dict["pool_size"] = (2, 2)
        convpooldictlist.append(convpool1dict)
        convpool2dict = {}
        convpool2dict["nfilters"] = 32
        convpool2dict["filter_size"] = (3, 3)
        convpool2dict["pool_size"] = (2, 2)
        convpooldictlist.append(convpool2dict)

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    net.update(make_Nconvpool_1dense_branch(view, net[input_name], convpooldictlist, nhidden, dropoutp))

    # One more dense layer
    dense_name = "dense-" + view
    net["dense-across"] = DenseLayer(
        dropout(net[dense_name], p=dropoutp), num_units=(nhidden // 2), nonlinearity=lasagne.nonlinearities.rectify
    )
    logger.info("Dense {} with nhidden = {}, dropout = {}".format("dense-across", nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net["output_prob"] = DenseLayer(
        net["dense-across"], num_units=noutputs, nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: {}".format(lasagne.layers.count_params(net["output_prob"])))
    return net["output_prob"]
示例#37
0
def create_nn():

	'''
	Returns the theano function - train,test 
	Returns the 'KerasNet'

	Using default values of adam - learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08

	Input to the NN is (batch_size,3,32,32) and corresponding classes it belong to (batch_size,)
	'''

	l_in = InputLayer((batch_size,3,32,32))
	l_in_bn = BatchNormLayer(l_in)
	
	conv1 = Conv2DLayer(l_in_bn,pad='same',num_filters=64,filter_size=(3,3),nonlinearity=lasagne.nonlinearities.rectify) #Bx64x32x32
	conv1_1 = Conv2DLayer(conv1,pad='same',num_filters=64,filter_size=(3,3),nonlinearity=lasagne.nonlinearities.rectify) #Bx64x32x32
	conv1_mp = MaxPool2DLayer(conv1_1,pool_size=(2,2)) #Bx64x16x16
	conv1_do = dropout(conv1_mp,p=0.25)

	conv2 = Conv2DLayer(conv1_do,pad='same',num_filters=128,filter_size=(3,3),nonlinearity=lasagne.nonlinearities.rectify) #Bx128x16x16
	conv2_1 = Conv2DLayer(conv2,pad='same',num_filters=128,filter_size=(3,3),nonlinearity=lasagne.nonlinearities.rectify) #Bx128x16x16
	conv2_mp = MaxPool2DLayer(conv2_1,pool_size=(2,2)) #Bx128x8x8
	conv2_do = dropout(conv2_mp,p=0.25)

	flat = flatten(conv2_do,2) #Bx8192
	fc = DenseLayer(flat,num_units=512,nonlinearity=lasagne.nonlinearities.rectify) #Bx512
	fc_do = dropout(fc, p=0.5) 
	network = DenseLayer(fc_do, num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax	) #Bxnb_classes

	net_output = lasagne.layers.get_output(network)
	true_output = T.matrix()

	all_params = lasagne.layers.get_all_params(network,trainable=True)
	loss = T.mean(lasagne.objectives.categorical_crossentropy(net_output,true_output))
	updates = lasagne.updates.adam(loss,all_params)

	train = theano.function(inputs= [l_in.input_var,true_output] , outputs=[net_output,loss], updates = updates)
	test = theano.function(inputs= [l_in.input_var], outputs= [net_output])

	return train,test,network
示例#38
0
def build_discriminator(input_var=None):
    #D_inp = T.tensor4('Ds')
    D = l.InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    D = l.Conv2DLayer(D,
                      num_filters=20,
                      filter_size=(5, 5),
                      nonlinearity=reLU,
                      W=lasagne.init.GlorotUniform())
    #D = l.Conv2DLayer(D,1,filter_size=(2,2), stride=2, nonlinearity=reLU)
    D = l.DropoutLayer(D, p=0.2)
    D = l.Conv2DLayer(D,
                      num_filters=20,
                      filter_size=(5, 5),
                      nonlinearity=reLU,
                      W=lasagne.init.GlorotUniform())
    #D = l.Conv2DLayer(D,1,filter_size=(2,2), stride=2, nonlinearity=reLU)
    D = l.DropoutLayer(D, p=0.2)
    D = l.DenseLayer(l.dropout(D, p=0.5), num_units=256, nonlinearity=reLU)
    D = l.DenseLayer(l.dropout(D, p=0.5), num_units=1, nonlinearity=softmax)

    #D1.params = D.params
    return D
 def __build_24_net__(self):
    
     network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
     network = layers.dropout(network, p=0.1)
     network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
     network = layers.batch_norm(network)
     network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
     network = layers.DropoutLayer(network,p=0.5)
     network = layers.batch_norm(network)
     network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
     network = layers.DropoutLayer(network,p=0.5)
     network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
     return network
示例#40
0
    def __init__(self,
                 insize,
                 vocoder,
                 mlpg_wins=[],
                 hiddensize=256,
                 nonlinearity=lasagne.nonlinearities.very_leaky_rectify,
                 nblayers=3,
                 bn_axes=None,
                 dropout_p=-1.0,
                 grad_clipping=50):
        if bn_axes is None:
            bn_axes = []  # Recurrent nets don't like batch norm [ref needed]
        model.Model.__init__(self, insize, vocoder, hiddensize)

        if len(bn_axes) > 0:
            warnings.warn(
                'ModelBLSTM: You are using bn_axes={}, but batch normalisation is supposed to make Recurrent Neural Networks (RNNS) unstable [ref. needed]'
                .format(bn_axes))

        l_hid = ll.InputLayer(shape=(None, None, insize),
                              input_var=self._input_values,
                              name='input_conditional')

        for layi in xrange(nblayers):
            layerstr = 'l' + str(1 + layi) + '_BLSTM{}'.format(hiddensize)

            fwd = layer_LSTM(l_hid,
                             hiddensize,
                             nonlinearity=nonlinearity,
                             backwards=False,
                             grad_clipping=grad_clipping,
                             name=layerstr + '.fwd')
            bck = layer_LSTM(l_hid,
                             hiddensize,
                             nonlinearity=nonlinearity,
                             backwards=True,
                             grad_clipping=grad_clipping,
                             name=layerstr + '.bck')
            l_hid = ll.ConcatLayer((fwd, bck), axis=2)

            # Add batch normalisation
            if len(bn_axes) > 0: l_hid = ll.batch_norm(l_hid, axes=bn_axes)

            # Add dropout (after batchnorm)
            if dropout_p > 0.0: l_hid = ll.dropout(l_hid, p=dropout_p)

        l_out = layer_final(l_hid, vocoder, mlpg_wins)

        self.init_finish(
            l_out
        )  # Has to be called at the end of the __init__ to print out the architecture, get the trainable params, etc.
示例#41
0
def build_dnn(input_var, nb_classes, n_chanels=1, input_size=20, reshaped_input_size=20, activity=softmax):
    """
    Builds the complete network with 1D-conv1d layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :return: a pointer to the output of last layer
    """
    # Input layer
    network = InputLayer(shape=(None, 1, input_size), input_var=input_var)

    #network = ReshapeLayer(network, (([0], n_chanels, reshaped_input_size)))

    network = DenseLayer(dropout(network, p=.2),
            num_units=160, nonlinearity=rectify)

    network = DenseLayer(dropout(network, p=.5),
            num_units=60, nonlinearity=rectify)

    network = DenseLayer(dropout(network, p=.5),
            num_units=nb_classes, nonlinearity=activity)

    return network
示例#42
0
def lenet5(inputs_shape,
           nonlinearity=default_nonlinearity,
           use_dropout=False,
           **kwargs):
    logger.warning("Unrecognized options to the model: %s", kwargs)

    l = InputLayer(inputs_shape)
    l = conv(l, 32, 5, nonlinearity=None)
    l = nonlin(l, nonlinearity=nonlinearity)
    l = maxpool(l, 2)

    l = conv(l, 64, 5, nonlinearity=None)
    l = nonlin(l, nonlinearity=nonlinearity)
    l = maxpool(l, 2)
    if use_dropout:
        l = dropout(l, p=0.5)
    l = dense(l, 512, nonlinearity=None)
    l = nonlin(l, nonlinearity=nonlinearity)
    if use_dropout:
        l = dropout(l, p=0.5)
    # output layers
    logits = dense(l, 10, nonlinearity=None)
    return logits
示例#43
0
文件: models.py 项目: srviest/SoloLa-
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw network with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, None), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_1', dropout=False, pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_2', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_3', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_4', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, None), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list, axis=1, cropping=[None, None, 'center'])
        network = self.set_conv_layer(network, 'conv_1')
        network = self.set_pool_layer(network, 'pool_1')
        network = self.set_conv_layer(network, 'conv_2')
        network = self.set_pool_layer(network, 'pool_2')
        network = self.set_conv_layer(network, 'conv_3')
        network = layers.GlobalPoolLayer(network, getattr(T, self.net_opts['global_pool_func']))
        # print(layers.get_output_shape(network))
        # network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
        #                           self.net_opts['dens_1'], 
        #                           nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                    self.net_opts['dens_2'], 
                                    nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                    self.net_opts['num_class'], 
                                    nonlinearity=lasagne.nonlinearities.softmax)
        # print(layers.get_output_shape(network))
        self.network = network
        return self.network
示例#44
0
def bid_layer(input_layer, rnn_dim, batch_size, rnn_shape, cell,
              add_dense=True, dropout_p=0.2, depth=1, **cell_args):
    """
    batch_size: int or symbolic_var (e.g. input_var.shape[0])
    context: int
    """
    if cell == 'lstm':
        cell = LSTMLayer
    elif cell == 'gru':
        cell = GRULayer
    else:
        raise ValueError('cell must be one of "lstm", "gru"')
    rnn = input_layer
    for n in range(depth):
        fwd = cell(rnn, rnn_dim, only_return_final=False, **cell_args)
        # No need to reverse output of bwd_lstm since backwards is defined:
        # backwards : bool
        #   process the sequence backwards and then reverse the output again
        #   such that the output from the layer is always from x1x1 to xnxn.
        bwd = cell(rnn, rnn_dim, only_return_final=False, backwards=True,
                   **cell_args)
        if add_dense:
            # reshape for dense
            fwd = ReshapeLayer(fwd, (-1, rnn_dim))
            bwd = ReshapeLayer(bwd, (-1, rnn_dim))
            fwd = DenseLayer(fwd, num_units=rnn_dim, nonlinearity=tanh)
            bwd = DenseLayer(bwd, num_units=rnn_dim, nonlinearity=tanh)
            # dropout
            fwd = dropout(fwd, p=dropout_p)
            bwd = dropout(bwd, p=dropout_p)
            # reshape back to input format
            fwd = ReshapeLayer(fwd, rnn_shape)
            bwd = ReshapeLayer(bwd, rnn_shape)
        # merge over lstm output dim (axis=2)
        rnn = ElemwiseSumLayer(incomings=[fwd, bwd])
    return rnn
def build_model_audio(modelfile, meanstd_file, input_dim, excerpt_size):
    """
    Builds the CNN architecture defined by Jan et. al. @ISMIR2015 and later loads the saved model
    and the mean std file.
    """
    # Build CNN architecture
    net = {}
    net['input'] = InputLayer((None, 1, excerpt_size, input_dim))
    kwargs = dict(nonlinearity=lasagne.nonlinearities.leaky_rectify,
                  W=lasagne.init.Orthogonal())
    net['Conv1_1'] = ConvLayer(net['input'], 64, 3, **kwargs)
    net['Conv1_2'] = ConvLayer(net['Conv1_1'], 32, 3, **kwargs)
    net['pool1'] = MaxPool2DLayer(net['Conv1_2'], 3)
    net['Conv2_1'] = ConvLayer(net['pool1'], 128, 3, **kwargs)
    net['Conv2_2'] = ConvLayer(net['Conv2_1'], 64, 3, **kwargs)
    net['pool2'] = MaxPool2DLayer(net['Conv2_2'], 3)
    net['fc3'] = DenseLayer(dropout(net['pool2'], 0.5), 256, **kwargs)
    net['fc4'] = DenseLayer(dropout(net['fc3'], 0.5), 64, **kwargs)
    net['score'] = DenseLayer(dropout(net['fc4'], 0.5),
                              1,
                              nonlinearity=lasagne.nonlinearities.sigmoid,
                              W=lasagne.init.Orthogonal())

    # load saved weights
    with np.load(modelfile) as f:
        lasagne.layers.set_all_param_values(
            net['score'], [f['param%d' % i] for i in range(len(f.files))])

    # - load mean/std
    with np.load(meanstd_file) as f:
        mean = f['mean']
        std = f['std']
    mean = mean.astype(floatX)
    istd = np.reciprocal(std).astype(floatX)

    return net, mean, istd
示例#46
0
def mlp(inputs_shape,
        layer_dims,
        nonlinearity=default_nonlinearity,
        use_dropout=False,
        **kwargs):
    logger.warning("Unrecognized options to the model: %s", kwargs)
    l = lasagne.layers.InputLayer(inputs_shape)
    W_init = lasagne.init.GlorotUniform()
    for i, layer_size in enumerate(layer_dims[:-1]):
        assert layer_size >= 0
        l = dense(l, layer_size, W=W_init, nonlinearity=None)
        l = nonlin(l, nonlinearity=nonlinearity)
        if use_dropout:
            l = dropout(l)
    logits = dense(l, layer_dims[-1], W=W_init, nonlinearity=None)
    return logits
示例#47
0
    def __build_24_net__(self):

        network = layers.InputLayer((None, 3, 24, 24),
                                    input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.DropoutLayer(network, p=0.5)
        network = layers.batch_norm(network)
        network = layers.DenseLayer(network, num_units=64, nonlinearity=relu)
        network = layers.DropoutLayer(network, p=0.5)
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
示例#48
0
def build_model_small(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = batch_norm(
        ConvLayer(net['input'],
                  num_filters=256,
                  filter_size=11,
                  nonlinearity=nonlinearities.leaky_rectify,
                  pad='same'))
    net['pool1'] = dropout(PoolLayer(net['conv1'], 2, mode='max'), 0.5)
    net['conv2'] = batch_norm(
        ConvLayer(net['pool1'],
                  num_filters=256,
                  filter_size=7,
                  nonlinearity=nonlinearities.leaky_rectify,
                  pad='same'))
    net['pool2'] = dropout(PoolLayer(net['conv2'], 2, mode='max'), 0.5)
    net['conv3'] = batch_norm(
        ConvLayer(net['pool2'],
                  num_filters=396,
                  filter_size=5,
                  nonlinearity=nonlinearities.leaky_rectify,
                  pad='same'))
    net['pool3'] = dropout(PoolLayer(net['conv3'], 2, mode='max'), 0.5)
    net['conv4'] = dropout(
        batch_norm(
            ConvLayer(net['pool3'],
                      num_filters=512,
                      filter_size=3,
                      nonlinearity=nonlinearities.leaky_rectify,
                      pad='same')), 0.5)
    net['conv5'] = dropout(
        batch_norm(
            ConvLayer(net['conv4'],
                      num_filters=1024,
                      filter_size=1,
                      nonlinearity=nonlinearities.leaky_rectify,
                      pad='same')), 0.5)
    net['dense1'] = dropout(
        batch_norm(
            DenseLayer(net['conv5'],
                       num_units=1024,
                       nonlinearity=nonlinearities.leaky_rectify)), 0.5)
    net['dense2'] = DenseLayer(net['dense1'],
                               num_units=11,
                               nonlinearity=nonlinearities.softmax)
    net['prob'] = net['dense2']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
示例#49
0
def build_model_dense(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = ConvLayer(net['input'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2'] = ConvLayer(net['conv1'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2/reshape'] = ReshapeLayer(net['conv2'], (-1, net['conv2'].output_shape[1] * net['conv2'].output_shape[2]))
    net['dense'] = dropout(DenseLayer(net['conv2/reshape'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify), 0.5)

    net['dense/inverse'] = inverse_dense_layer(net['dense'], net['dense'], net['conv2'].output_shape)
    net['conv2/inverse'] = inverse_convolution_layer(net['dense/inverse'], net['conv2'])
    net['conv1/inverse'] = inverse_convolution_layer(net['conv2/inverse'], net['conv1'])
    net['conv0/inverse'] = ConvLayer(net['conv1/inverse'], num_filters=input_shape[1], filter_size=1,nonlinearity=nonlinearities.linear, pad='same')
    net['prob'] = net['conv0/inverse']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
示例#50
0
 def make_branch(input_layer,
                 num_filters1, filter_size1, pool_size1,
                 num_filters2, filter_size2, pool_size2):
     """
     see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
     """
     convlayer1 = Conv2DLayer(input_layer, num_filters=num_filters1,
                              filter_size=filter_size1,
                              nonlinearity=lasagne.nonlinearities.rectify,
                              W=lasagne.init.GlorotUniform())
     maxpoollayer1 = MaxPool2DLayer(convlayer1, pool_size=pool_size1)
     convlayer2 = Conv2DLayer(maxpoollayer1, num_filters=num_filters2,
                              filter_size=filter_size1,
                              nonlinearity=lasagne.nonlinearities.rectify,
                              W=lasagne.init.GlorotUniform())
     maxpoollayer2 = MaxPool2DLayer(convlayer2, pool_size=pool_size2)
     dense1 = DenseLayer(
         dropout(maxpoollayer2, p=.5),
         num_units=256,
         nonlinearity=lasagne.nonlinearities.rectify)
     return dense1
示例#51
0
def build_model_small(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = batch_norm(ConvLayer(net['input'], num_filters=256, filter_size=11, nonlinearity=nonlinearities.leaky_rectify, pad='same'))
    net['pool1'] = dropout(PoolLayer(net['conv1'], 2, mode='max'), 0.5)
    net['conv2'] = batch_norm(ConvLayer(net['pool1'], num_filters=256, filter_size=7, nonlinearity=nonlinearities.leaky_rectify, pad='same'))
    net['pool2'] = dropout(PoolLayer(net['conv2'], 2, mode='max'), 0.5)
    net['conv3'] = batch_norm(ConvLayer(net['pool2'], num_filters=396, filter_size=5, nonlinearity=nonlinearities.leaky_rectify, pad='same'))
    net['pool3'] = dropout(PoolLayer(net['conv3'], 2, mode='max'), 0.5)
    net['conv4'] = dropout(batch_norm(ConvLayer(net['pool3'], num_filters=512, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')), 0.5)
    net['conv5'] = dropout(batch_norm(ConvLayer(net['conv4'], num_filters=1024, filter_size=1, nonlinearity=nonlinearities.leaky_rectify,pad='same')), 0.5)
    net['dense1'] = dropout(batch_norm(DenseLayer(net['conv5'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify)), 0.5)
    net['dense2'] = DenseLayer(net['dense1'], num_units=11, nonlinearity=nonlinearities.softmax)
    net['prob'] = net['dense2']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
    def __init__(self, train_list_raw, test_list_raw, png_folder, batch_size, dropout, l2, mode, batch_norm, **kwargs):
        
        print "==> not used params in DMN class:", kwargs.keys()
        self.train_list_raw = train_list_raw
        self.test_list_raw = test_list_raw
        self.png_folder = png_folder
        self.batch_size = batch_size
        self.dropout = dropout
        self.l2 = l2
        self.mode = mode
        self.batch_norm = batch_norm
        
        self.input_var = T.tensor4('input_var')
        self.answer_var = T.ivector('answer_var')
        
        print "==> building network"
        example = np.random.uniform(size=(self.batch_size, 1, 256, 858), low=0.0, high=1.0).astype(np.float32) #########
        answer = np.random.randint(low=0, high=176, size=(self.batch_size,)) #########
       
        network = layers.InputLayer(shape=(None, 1, 256, 858), input_var=self.input_var)
        print layers.get_output(network).eval({self.input_var:example}).shape
        
        # CONV-RELU-POOL 1
        network = layers.Conv2DLayer(incoming=network, num_filters=16, filter_size=(7, 7), 
                                     stride=1, nonlinearity=rectify)
        print layers.get_output(network).eval({self.input_var:example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, ignore_border=False)
        print layers.get_output(network).eval({self.input_var:example}).shape
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        
        # CONV-RELU-POOL 2
        network = layers.Conv2DLayer(incoming=network, num_filters=32, filter_size=(5, 5), 
                                     stride=1, nonlinearity=rectify)
        print layers.get_output(network).eval({self.input_var:example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, ignore_border=False)
        print layers.get_output(network).eval({self.input_var:example}).shape
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)

        
        # CONV-RELU-POOL 3
        network = layers.Conv2DLayer(incoming=network, num_filters=64, filter_size=(3, 3), 
                                     stride=1, nonlinearity=rectify)
        print layers.get_output(network).eval({self.input_var:example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, ignore_border=False)
        print layers.get_output(network).eval({self.input_var:example}).shape
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        
        # CONV-RELU-POOL 4
        network = layers.Conv2DLayer(incoming=network, num_filters=128, filter_size=(3, 3), 
                                     stride=1, nonlinearity=rectify)
        print layers.get_output(network).eval({self.input_var:example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, ignore_border=False)
        print layers.get_output(network).eval({self.input_var:example}).shape
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        
        # CONV-RELU-POOL 5
        network = layers.Conv2DLayer(incoming=network, num_filters=128, filter_size=(3, 3), 
                                     stride=1, nonlinearity=rectify)
        print layers.get_output(network).eval({self.input_var:example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, ignore_border=False)
        print layers.get_output(network).eval({self.input_var:example}).shape
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        
        # CONV-RELU-POOL 6
        network = layers.Conv2DLayer(incoming=network, num_filters=256, filter_size=(3, 3), 
                                     stride=1, nonlinearity=rectify)
        print layers.get_output(network).eval({self.input_var:example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=(3, 2), ignore_border=False)
        print layers.get_output(network).eval({self.input_var:example}).shape
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        
        # DENSE 1
        network = layers.DenseLayer(incoming=network, num_units=1024, nonlinearity=rectify)
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        if (self.dropout > 0):
            network = layers.dropout(network, self.dropout)
        print layers.get_output(network).eval({self.input_var:example}).shape
        
        """
        # DENSE 2
        network = layers.DenseLayer(incoming=network, num_units=1024, nonlinearity=rectify)
        if (self.batch_norm):
            network = layers.BatchNormLayer(incoming=network)
        if (self.dropout > 0):
            network = layers.dropout(network, self.dropout)
        print layers.get_output(network).eval({self.input_var:example}).shape
        """
        
        # Last layer: classification
        network = layers.DenseLayer(incoming=network, num_units=176, nonlinearity=softmax)
        print layers.get_output(network).eval({self.input_var:example}).shape
        
        
        self.params = layers.get_all_params(network, trainable=True)
        self.prediction = layers.get_output(network)
        
        self.loss_ce = lasagne.objectives.categorical_crossentropy(self.prediction, self.answer_var).mean()
        if (self.l2 > 0):
            self.loss_l2 = self.l2 * lasagne.regularization.regularize_network_params(network, 
                                                                    lasagne.regularization.l2)
        else:
            self.loss_l2 = 0
        self.loss = self.loss_ce + self.loss_l2
        
        #updates = lasagne.updates.adadelta(self.loss, self.params)
        updates = lasagne.updates.momentum(self.loss, self.params, learning_rate=0.003)
        
        if self.mode == 'train':
            print "==> compiling train_fn"
            self.train_fn = theano.function(inputs=[self.input_var, self.answer_var], 
                                            outputs=[self.prediction, self.loss],
                                            updates=updates)
        
        print "==> compiling test_fn"
        self.test_fn = theano.function(inputs=[self.input_var, self.answer_var],
                                       outputs=[self.prediction, self.loss])
    def __init__(
        self,
        train_list_raw,
        test_list_raw,
        png_folder,
        batch_size,
        dropout,
        l2,
        mode,
        batch_norm,
        rnn_num_units,
        **kwargs
    ):

        print "==> not used params in DMN class:", kwargs.keys()
        self.train_list_raw = train_list_raw
        self.test_list_raw = test_list_raw
        self.png_folder = png_folder
        self.batch_size = batch_size
        self.dropout = dropout
        self.l2 = l2
        self.mode = mode
        self.batch_norm = batch_norm
        self.num_units = rnn_num_units

        self.input_var = T.tensor4("input_var")
        self.answer_var = T.ivector("answer_var")

        print "==> building network"
        example = np.random.uniform(size=(self.batch_size, 1, 128, 768), low=0.0, high=1.0).astype(
            np.float32
        )  #########
        answer = np.random.randint(low=0, high=176, size=(self.batch_size,))  #########

        network = layers.InputLayer(shape=(None, 1, 128, 768), input_var=self.input_var)
        print layers.get_output(network).eval({self.input_var: example}).shape

        # CONV-RELU-POOL 1
        network = layers.Conv2DLayer(
            incoming=network, num_filters=16, filter_size=(7, 7), stride=1, nonlinearity=rectify
        )
        print layers.get_output(network).eval({self.input_var: example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)
        print layers.get_output(network).eval({self.input_var: example}).shape
        if self.batch_norm:
            network = layers.BatchNormLayer(incoming=network)

        # CONV-RELU-POOL 2
        network = layers.Conv2DLayer(
            incoming=network, num_filters=32, filter_size=(5, 5), stride=1, nonlinearity=rectify
        )
        print layers.get_output(network).eval({self.input_var: example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)
        print layers.get_output(network).eval({self.input_var: example}).shape
        if self.batch_norm:
            network = layers.BatchNormLayer(incoming=network)

        # CONV-RELU-POOL 3
        network = layers.Conv2DLayer(
            incoming=network, num_filters=32, filter_size=(3, 3), stride=1, nonlinearity=rectify
        )
        print layers.get_output(network).eval({self.input_var: example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)
        print layers.get_output(network).eval({self.input_var: example}).shape
        if self.batch_norm:
            network = layers.BatchNormLayer(incoming=network)

        # CONV-RELU-POOL 4
        network = layers.Conv2DLayer(
            incoming=network, num_filters=32, filter_size=(3, 3), stride=1, nonlinearity=rectify
        )
        print layers.get_output(network).eval({self.input_var: example}).shape
        network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)
        print layers.get_output(network).eval({self.input_var: example}).shape
        if self.batch_norm:
            network = layers.BatchNormLayer(incoming=network)

        self.params = layers.get_all_params(network, trainable=True)

        output = layers.get_output(network)
        num_channels = 32
        filter_W = 48
        filter_H = 8

        # NOTE: these constants are shapes of last pool layer, it can be symbolic
        # explicit values are better for optimizations

        channels = []
        for channel_index in range(num_channels):
            channels.append(output[:, channel_index, :, :].transpose((0, 2, 1)))

        rnn_network_outputs = []
        W_in_to_updategate = None
        W_hid_to_updategate = None
        b_updategate = None
        W_in_to_resetgate = None
        W_hid_to_resetgate = None
        b_resetgate = None
        W_in_to_hidden_update = None
        W_hid_to_hidden_update = None
        b_hidden_update = None

        for channel_index in range(num_channels):
            rnn_input_var = channels[channel_index]

            # InputLayer
            network = layers.InputLayer(shape=(None, filter_W, filter_H), input_var=rnn_input_var)

            if channel_index == 0:
                # GRULayer
                network = layers.GRULayer(incoming=network, num_units=self.num_units, only_return_final=True)
                W_in_to_updategate = network.W_in_to_updategate
                W_hid_to_updategate = network.W_hid_to_updategate
                b_updategate = network.b_updategate
                W_in_to_resetgate = network.W_in_to_resetgate
                W_hid_to_resetgate = network.W_hid_to_resetgate
                b_resetgate = network.b_resetgate
                W_in_to_hidden_update = network.W_in_to_hidden_update
                W_hid_to_hidden_update = network.W_hid_to_hidden_update
                b_hidden_update = network.b_hidden_update

                # add params
                self.params += layers.get_all_params(network, trainable=True)

            else:
                # GRULayer, but shared
                network = layers.GRULayer(
                    incoming=network,
                    num_units=self.num_units,
                    only_return_final=True,
                    resetgate=layers.Gate(W_in=W_in_to_resetgate, W_hid=W_hid_to_resetgate, b=b_resetgate),
                    updategate=layers.Gate(W_in=W_in_to_updategate, W_hid=W_hid_to_updategate, b=b_updategate),
                    hidden_update=layers.Gate(
                        W_in=W_in_to_hidden_update, W_hid=W_hid_to_hidden_update, b=b_hidden_update
                    ),
                )

            rnn_network_outputs.append(layers.get_output(network))

        all_output_var = T.concatenate(rnn_network_outputs, axis=1)
        print all_output_var.eval({self.input_var: example}).shape

        # InputLayer
        network = layers.InputLayer(shape=(None, self.num_units * num_channels), input_var=all_output_var)

        # Dropout Layer
        if self.dropout > 0:
            network = layers.dropout(network, self.dropout)

        # BatchNormalization Layer
        if self.batch_norm:
            network = layers.BatchNormLayer(incoming=network)

        # Last layer: classification
        network = layers.DenseLayer(incoming=network, num_units=176, nonlinearity=softmax)
        print layers.get_output(network).eval({self.input_var: example}).shape

        self.params += layers.get_all_params(network, trainable=True)
        self.prediction = layers.get_output(network)

        # print "==> param shapes", [x.eval().shape for x in self.params]

        self.loss_ce = lasagne.objectives.categorical_crossentropy(self.prediction, self.answer_var).mean()
        if self.l2 > 0:
            self.loss_l2 = self.l2 * lasagne.regularization.apply_penalty(self.params, lasagne.regularization.l2)
        else:
            self.loss_l2 = 0
        self.loss = self.loss_ce + self.loss_l2

        # updates = lasagne.updates.adadelta(self.loss, self.params)
        updates = lasagne.updates.momentum(self.loss, self.params, learning_rate=0.003)

        if self.mode == "train":
            print "==> compiling train_fn"
            self.train_fn = theano.function(
                inputs=[self.input_var, self.answer_var], outputs=[self.prediction, self.loss], updates=updates
            )

        print "==> compiling test_fn"
        self.test_fn = theano.function(inputs=[self.input_var, self.answer_var], outputs=[self.prediction, self.loss])
def buildCNNFingerprint(input_atom, input_bonds, input_atom_index, input_bond_index, input_mask, \
    max_atom_len, max_bond_len, input_atom_dim, input_bond_dim, input_index_dim, fingerprint_dim,
    batch_size, output_dim, final_layer_type, fingerprint_network_architecture=[],\
    neural_net=[]):

    dropout_prob = 0.0
    network_vals = {}

    #take in input layers for the atom and the bonds
    l_in_atom = InputLayer(shape=(None,max_atom_len,input_atom_dim), \
        input_var=input_atom)
    l_in_bond = InputLayer(shape=(None,max_bond_len,input_bond_dim), \
        input_var=input_bonds)

    #take in layers for the indexing into the atoms and bonds
    l_index_atom = InputLayer(shape=(None,max_atom_len,input_index_dim), \
        input_var=input_atom_index)
    l_index_bond = InputLayer(shape=(None,max_atom_len,input_index_dim), \
        input_var=input_bond_index)

    #take in input for the mask
    l_mask = InputLayer(shape=(None,max_atom_len), input_var=input_mask)


    #get the number of hidden units for the first layer
    first_hidden_units_num = fingerprint_network_architecture[0]


    #get the embedding of the sequences we are encoding
    network_vals['hiddens_for_atoms'] = FingerprintHiddensLayer([l_index_atom,l_index_bond,\
        l_in_atom,l_in_bond,l_in_atom,l_mask], input_atom_dim, input_atom_dim,
        input_bond_dim,first_hidden_units_num,max_atom_len)

    #sparsify
    network_vals['sparse_for_atoms'] = SparsifyFingerprintLayer([network_vals['hiddens_for_atoms']],\
        first_hidden_units_num, fingerprint_dim)


    network_vals['fingerprints'] = FingerprintGen([network_vals['sparse_for_atoms'],l_mask])

    for i,curr_num_hiddens in enumerate(fingerprint_network_architecture[1:]):

        prev_hidden_units = fingerprint_network_architecture[i]

        network_vals['hiddens_for_atoms'] = FingerprintHiddensLayer([l_index_atom,l_index_bond,\
            l_in_atom,l_in_bond,network_vals['hiddens_for_atoms'],l_mask], prev_hidden_units,
            input_atom_dim, input_bond_dim, curr_num_hiddens, max_atom_len)

        network_vals['sparse_for_atoms'] = SparsifyFingerprintLayer([network_vals['hiddens_for_atoms']],\
            curr_num_hiddens, fingerprint_dim)

        newFingerprints = FingerprintGen([network_vals['sparse_for_atoms'],l_mask])

        network_vals['fingerprints'] =  FingerprintMerge([network_vals['fingerprints'],newFingerprints])

    #then run through the neural net on top of the fingerprint
    if neural_net != []:
        #do dropout
        network_vals['drop0'] = lasagne.layers.dropout(network_vals['fingerprints'],p=dropout_prob)

        #run through the layers I have
        for layerNum,hiddenUnits in enumerate(neural_net):
            oldLayerNum = layerNum
            currLayerNum = layerNum + 1
            network_vals['dense'+str(currLayerNum)] = DenseLayer(network_vals['drop'+str(oldLayerNum)], \
                hiddenUnits,nonlinearity=lasagne.nonlinearities.rectify)
            network_vals['drop'+str(currLayerNum)] = dropout(network_vals['dense'+str(currLayerNum)],p=dropout_prob)

        network_vals['final_out'] = network_vals['drop'+str(currLayerNum)]

    else:
        network_vals['final_out'] = network_vals['fingerprints']


    #finally, project it into the dimensionality we want
    network_vals['output'] = DenseLayer(network_vals['final_out'],num_units=output_dim, nonlinearity=final_layer_type)

    return network_vals
def build_critic(input_var=None, cond_var=None, n_conds=0, arch=0,
                 with_BatchNorm=True, loss_type='wgan'):
    from lasagne.layers import (
        InputLayer, Conv2DLayer, DenseLayer, MaxPool2DLayer, concat,
        dropout, flatten)
    from lasagne.nonlinearities import rectify, LeakyRectify
    from lasagne.init import GlorotUniform  # Normal
    lrelu = LeakyRectify(0.2)
    layer = InputLayer(
        shape=(None, 1, 128, 128), input_var=input_var, name='d_in_data')
    # init = Normal(0.02, 0.0)
    init = GlorotUniform()

    if cond_var:
        # class: from data or from generator input
        layer_cond = InputLayer(
            shape=(None, n_conds), input_var=cond_var, name='d_in_condition')
        layer_cond = BatchNorm(DenseLayer(
            layer_cond, 1024, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
    if arch == 'dcgan':
        # DCGAN inspired
        layer = BatchNorm(Conv2DLayer(
            layer, 32, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 512, 4, stride=2, pad=1, W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
    elif arch == 'cont-enc':
        # convolution layers
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 64, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 512, 4, stride=2, pad=1, W=init, nonlinearity=lrelu),
            with_BatchNorm)
    elif arch == 'mnist':
        # Jan Schluechter's MNIST discriminator
        # convolution layers
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 128, 5, stride=2, pad='same', W=init, b=None,
            nonlinearity=lrelu), with_BatchNorm)
        # layer = BatchNorm(Conv2DLayer(
        #     layer, 128, 5, stride=2, pad='same', W=init, b=None,
        #      nonlinearity=lrelu), with_BatchNorm)
        # fully-connected layer
        # layer = BatchNorm(DenseLayer(
        #     layer, 1024, W=init, b=None, nonlinearity=lrelu), with_BatchNorm)
    elif arch == 'lsgan':
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
        layer = batch_norm(Conv2DLayer(
            layer, 256, 5, stride=2, pad='same', nonlinearity=lrelu))
    elif arch == 'crepe':
        # CREPE
        # form words from sequence of characters
        layer = BatchNorm(Conv2DLayer(
            layer, 1024, (128, 7), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = MaxPool2DLayer(layer, (1, 3))
        # temporal convolution, 7-gram
        layer = BatchNorm(Conv2DLayer(
            layer, 512, (1, 7), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = MaxPool2DLayer(layer, (1, 3))
        # temporal convolution, 3-gram
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = BatchNorm(Conv2DLayer(
            layer, 256, (1, 3), W=init, b=None, nonlinearity=lrelu),
            with_BatchNorm)
        layer = flatten(layer)
        # fully-connected layers
        layer = dropout(DenseLayer(
            layer, 1024, W=init, b=None, nonlinearity=rectify))
        layer = dropout(DenseLayer(
            layer, 1024, W=init, b=None, nonlinearity=rectify))
    else:
        raise Exception("Model architecture {} is not supported".format(arch))
        # output layer (linear and without bias)
    if cond_var is not None:
        layer = DenseLayer(layer, 1024, nonlinearity=lrelu, b=None)
        layer = concat([layer, layer_cond])

    layer = DenseLayer(layer, 1, b=None, nonlinearity=None)
    print("Critic output:", layer.output_shape)
    return layer
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
domain_target_var = T.ivector('domain_targets')
num_classes = 2

net = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)

halfway_net = DenseLayer(net, num_units=1024)

net = DenseLayer(halfway_net,
                 num_units=num_classes,
                 nonlinearity=lasagne.nonlinearities.softmax)

# gradient reversal branch
gr_branch = DenseLayer(halfway_net, num_units=512)
gr_branch = DenseLayer(dropout(gr_branch),
                       num_units=2,
                       nonlinearity=lasagne.nonlinearities.softmax)

###################################
# Define and compile Theano

# I.e., you've got one output layer for the source task classification, and another output layer for the domain classification, and both share the same input layer (and a part of the network).
# You'd then define two ordinary loss functions:
pred_sourcetask, pred_domainclass = lasagne.layers.get_output(
    [net, gr_branch])
loss_sourcetask = lasagne.objectives.categorical_crossentropy(
    pred_sourcetask, target_var).mean()
loss_domainclass = lasagne.objectives.categorical_crossentropy(
    pred_domainclass, domain_target_var).mean()
示例#57
0
def build_triamese_gamma(
    inputlist, imgh=50, imgw=50, convpooldictlist=None, nhidden=None, dropoutp=None, noutputs=11, depth=1
):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using two layers of convolutions - no pooling.
    """
    net = {}
    # Input layer
    input_var_x, input_var_u, input_var_v = inputlist[0], inputlist[1], inputlist[2]
    tshape = (None, depth, imgw, imgh)
    net["input-x"] = InputLayer(shape=tshape, input_var=input_var_x)
    net["input-u"] = InputLayer(shape=tshape, input_var=input_var_u)
    net["input-v"] = InputLayer(shape=tshape, input_var=input_var_v)

    if convpooldictlist is None:
        convpooldictlist = []
        convpool1dict = {}
        convpool1dict["nfilters"] = 32
        convpool1dict["filter_size"] = (3, 3)
        convpooldictlist.append(convpool1dict)
        convpool2dict = {}
        convpool2dict["nfilters"] = 16
        convpool2dict["filter_size"] = (3, 3)
        convpooldictlist.append(convpool2dict)

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    def make_branch(view, input_layer, cpdictlist, nhidden=256, dropoutp=0.5):
        """
        see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
        convolution only - no pooling
        """
        net = {}
        convname = ""
        prev_layername = ""
        for i, cpdict in enumerate(cpdictlist):
            convname = "conv-{}-{}".format(view, i)
            logger.info("Convpool {} params: {}".format(convname, cpdict))
            # the first time through, use `input`, after use the last layer
            # from the previous iteration - ah loose scoping rules...
            if i == 0:
                layer = input_layer
            else:
                layer = net[prev_layername]
            net[convname] = Conv2DLayer(
                layer,
                num_filters=cpdict["nfilters"],
                filter_size=cpdict["filter_size"],
                nonlinearity=lasagne.nonlinearities.rectify,
                W=lasagne.init.GlorotUniform(),
            )
            prev_layername = convname
        densename = "dense-{}".format(view)
        net[densename] = DenseLayer(
            dropout(net[convname], p=dropoutp), num_units=nhidden, nonlinearity=lasagne.nonlinearities.rectify
        )
        logger.info("Dense {} with nhidden = {}, dropout = {}".format(densename, nhidden, dropoutp))
        return net

    net.update(make_branch("x", net["input-x"], convpooldictlist, nhidden, dropoutp))
    net.update(make_branch("u", net["input-u"], convpooldictlist, nhidden, dropoutp))
    net.update(make_branch("v", net["input-v"], convpooldictlist, nhidden, dropoutp))

    # Concatenate the two parallel inputs
    net["concat"] = ConcatLayer((net["dense-x"], net["dense-u"], net["dense-v"]))
    logger.info("Network: concat columns...")

    # One more dense layer
    net["dense-across"] = DenseLayer(
        dropout(net["concat"], p=dropoutp), num_units=(nhidden // 2), nonlinearity=lasagne.nonlinearities.rectify
    )
    logger.info("Dense {} with nhidden = {}, dropout = {}".format("dense-across", nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net["output_prob"] = DenseLayer(
        net["dense-across"], num_units=noutputs, nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: {}".format(lasagne.layers.count_params(net["output_prob"])))
    return net["output_prob"]