Example #1
0
def build_model1(input_shape, num_output=2, input_var=None, depth=1, num_units=79, num_rbf=0, nonlin=rect):
    """ each layer has rbf and relu activation. all layers have the same distribution except the output.
    if dropout is True, it is set at 50% probability.
    """

    assert num_rbf <= num_units
    l = layers.InputLayer(input_shape,input_var=input_var)

    for d in range(depth):
        if num_rbf == 0:
            l = layers.DenseLayer(l,num_units=num_units, nonlinearity=nonlin())

        else :
            l1 = layers.DenseLayer(l,num_units=(num_units-num_rbf),nonlinearity=nonlin())
            l2 = layers.DenseLayer(l, num_units=num_rbf,nonlinearity=sigm)
            l = layers.ConcatLayer([l1,l2])
#        if dropout:
#            l = layers.DropoutLayer(l, p=0.5)

    if True :
        assert num_output== 2
        l2 = layers.DenseLayer(l, num_units=1, nonlinearity=nonlin())
        l1 = layers.DenseLayer(l, num_units=1, nonlinearity=sigm)
        l = layers.ConcatLayer([l1, l2])
    else :
        l = layers.DenseLayer(l,num_units=num_output)
    return l
Example #2
0
def concat_tcn(_top, _cond, _seed, start=0, num_slices=1):
    _cond1 = create_conditon_slices_from(_cond, _top.output_shape)
    if num_slices>0:
        _seed1, n = create_slices_from(_seed, _top.output_shape, start=start, num_slices=num_slices)
        return L.ConcatLayer([_top, _cond1, _seed1], axis=1), start+n
    else:
        return L.ConcatLayer([_top, _cond1], axis=1), start
Example #3
0
    def __init__(self, incomings, num_units_hidden_common, dim_z, beta):
        '''
         params:
             incomings: input layers, [image, label]
             num_units_hidden_common: num_units_hidden for all BasicLayers.
             dim_z: the dimension of z and num_units_output for encoders BaiscLayer
        '''

        super(SemiVAE, self).__init__(incomings)
        self.mrg_srng = MRG_RandomStreams()

        # random generator
        self.incomings = incomings
        self.num_classes = incomings[1].output_shape[1]
        self.num_units_hidden_common = num_units_hidden_common
        self.dim_z = dim_z
        self.beta = beta

        self.concat_xy = layers.ConcatLayer(self.incomings, axis=1)

        self.encoder = BasicLayer(
            self.concat_xy,
            num_units_hidden=self.num_units_hidden_common,
        )

        self.encoder_mu = layers.DenseLayer(
            self.encoder, self.dim_z, nonlinearity=nonlinearities.identity)

        self.encoder_log_var = layers.DenseLayer(
            self.encoder, self.dim_z, nonlinearity=nonlinearities.identity)

        [image_input, label_input] = self.incomings
        self.dim_image = image_input.output_shape[1]
        print('dim_image: ', self.dim_image)

        # merge encoder_mu and encoder_log_var to get z.
        self.sampler = SamplerLayer([self.encoder_mu, self.encoder_log_var])

        self.concat_yz = layers.ConcatLayer([label_input, self.sampler],
                                            axis=1)
        self.decoder = BasicLayer(
            self.concat_yz, num_units_hidden=self.num_units_hidden_common)

        self.decoder_x = layers.DenseLayer(self.decoder,
                                           num_units=self.dim_image,
                                           nonlinearity=nonlinearities.sigmoid)

        self.classifier_helper = BasicLayer(
            self.incomings[0], num_units_hidden=self.num_units_hidden_common)

        self.classifier = layers.DenseLayer(
            self.classifier_helper,
            num_units=self.num_classes,
            nonlinearity=nonlinearities.softmax,
        )
Example #4
0
 def build_network(self):
     l_char1_in = L.InputLayer(shape=(None, None, self.max_word_len),
                               input_var=self.inps[0])
     l_char2_in = L.InputLayer(shape=(None, None, self.max_word_len),
                               input_var=self.inps[1])
     l_mask1_in = L.InputLayer(shape=(None, None, self.max_word_len),
                               input_var=self.inps[2])
     l_mask2_in = L.InputLayer(shape=(None, None, self.max_word_len),
                               input_var=self.inps[3])
     l_char_in = L.ConcatLayer([l_char1_in, l_char2_in],
                               axis=1)  # B x (ND+NQ) x L
     l_char_mask = L.ConcatLayer([l_mask1_in, l_mask2_in], axis=1)
     shp = (self.inps[0].shape[0],
            self.inps[0].shape[1] + self.inps[1].shape[1],
            self.inps[1].shape[2])
     l_index_reshaped = L.ReshapeLayer(l_char_in,
                                       (shp[0] * shp[1], shp[2]))  # BN x L
     l_mask_reshaped = L.ReshapeLayer(l_char_mask,
                                      (shp[0] * shp[1], shp[2]))  # BN x L
     l_lookup = L.EmbeddingLayer(l_index_reshaped, self.num_chars,
                                 self.char_dim)  # BN x L x D
     l_fgru = L.GRULayer(l_lookup,
                         2 * self.char_dim,
                         grad_clipping=10,
                         gradient_steps=-1,
                         precompute_input=True,
                         only_return_final=True,
                         mask_input=l_mask_reshaped)
     l_bgru = L.GRULayer(l_lookup,
                         2 * self.char_dim,
                         grad_clipping=10,
                         gradient_steps=-1,
                         precompute_input=True,
                         backwards=True,
                         only_return_final=True,
                         mask_input=l_mask_reshaped)  # BN x 2D
     l_fwdembed = L.DenseLayer(l_fgru,
                               self.embed_dim / 2,
                               nonlinearity=None)  # BN x DE
     l_bckembed = L.DenseLayer(l_bgru,
                               self.embed_dim / 2,
                               nonlinearity=None)  # BN x DE
     l_embed = L.ElemwiseSumLayer([l_fwdembed, l_bckembed], coeffs=1)
     l_char_embed = L.ReshapeLayer(l_embed,
                                   (shp[0], shp[1], self.embed_dim / 2))
     l_embed1 = L.SliceLayer(l_char_embed,
                             slice(0, self.inps[0].shape[1]),
                             axis=1)
     l_embed2 = L.SliceLayer(l_char_embed,
                             slice(-self.inps[1].shape[1], None),
                             axis=1)
     return l_embed1, l_embed2
Example #5
0
def init_cnn(model_file, hidden_units, num_filters, filter_hs, dropout_rate,
             n_words, n_dim):
    """
    initializes CNN by loading weights of a previously trained model. note that the model
    trained and this model need to have same parameters. see trainCNN.py for explanation of
    neural network architecture
    :param model_file:
    :param hidden_units:
    :param num_filters:
    :param filter_hs:
    :param dropout_rate:
    :param n_words:
    :param n_dim:
    :return:
    """
    assert len(num_filters) == len(filter_hs)
    filter_shapes = []
    pool_sizes = []
    for filter_h in filter_hs:
        filter_shapes.append((filter_h, n_dim))
        pool_sizes.append((n_words - filter_h + 1, 1))

    l_in = LL.InputLayer(shape=(None, 1, n_words, n_dim))

    layer_list = []
    for i in range(len(filter_hs)):
        l_conv = LL.Conv2DLayer(l_in,
                                num_filters=num_filters[i],
                                filter_size=filter_shapes[i],
                                nonlinearity=L.nonlinearities.rectify,
                                W=L.init.HeNormal(gain='relu'))
        l_pool = LL.MaxPool2DLayer(l_conv, pool_size=pool_sizes[i])
        layer_list.append(l_pool)

    mergedLayer = LL.ConcatLayer(layer_list)

    l_hidden1 = LL.DenseLayer(mergedLayer,
                              num_units=hidden_units[0],
                              nonlinearity=L.nonlinearities.tanh,
                              W=L.init.HeNormal(gain='relu'))
    l_hidden1_dropout = LL.DropoutLayer(l_hidden1, p=dropout_rate[0])

    l_hidden2 = LL.DenseLayer(l_hidden1_dropout,
                              num_units=hidden_units[1],
                              nonlinearity=L.nonlinearities.tanh,
                              W=L.init.HeNormal(gain='relu'))
    l_hidden2_dropout = LL.DropoutLayer(l_hidden2, p=dropout_rate[1])

    l_output = LL.DenseLayer(l_hidden2_dropout,
                             num_units=hidden_units[2],
                             nonlinearity=L.nonlinearities.tanh)

    net_output = theano.function([l_in.input_var],
                                 LL.get_output(l_output, deterministic=True))

    with np.load(model_file) as f:
        param_values = [f['arr_%d' % i] for i in range(len(f.files))]
    LL.set_all_param_values(l_output, param_values)

    return net_output
Example #6
0
def pons_cnn(params):
    """"""
    layers = L.InputLayer((None, 1, params['dur'], 128))
    print layers.output_shape

    sclr = joblib.load(params['scaler'])
    layers = L.standardize(layers,
                           sclr.mean_.astype(np.float32),
                           sclr.scale_.astype(np.float32),
                           shared_axes=(0, 1, 2))
    print layers.output_shape

    layers_timbre = L.GlobalPoolLayer(
        L.batch_norm(L.Conv2DLayer(layers, 64, (1, 96))))

    layers_rhythm = L.GlobalPoolLayer(
        L.batch_norm(L.Conv2DLayer(layers, 64, (params['dur'] - 10, 1))))

    layers = L.ConcatLayer([layers_rhythm, layers_timbre], axis=-1)

    layers = L.DenseLayer(layers, 64, nonlinearity=nl.rectify)
    print layers.output_shape

    layers = L.DenseLayer(layers, 16, nonlinearity=nl.softmax)
    print layers.output_shape

    return layers
Example #7
0
def build_network():
    l_in = L.InputLayer((None, SEQUENCE_LEN, 256))
    l_forward = L.RecurrentLayer(l_in, num_units=16)
    l_backward = L.RecurrentLayer(l_in, num_units=16, backwards=True)
    l_concat = L.ConcatLayer([l_forward, l_backward])
    l_out = L.DenseLayer(l_concat, num_units=2, nonlinearity=T.nnet.softmax)
    return l_out
    def __build_48_net__(self):

        model24 = self.subnet
        network = layers.InputLayer((None, 3, 48, 48),
                                    input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(
            layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2))
        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.BatchNormLayer(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.DenseLayer(network, num_units=256, nonlinearity=relu)
        #network = layers.Conv2DLayer(network,num_filters=256,filter_size=(1,1),stride=1,nonlinearity=relu)
        denselayer24 = model24.net.input_layer
        network = layers.ConcatLayer([network, denselayer24])
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
Example #9
0
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw dnn with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, 3969), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_1', dropout=False, pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_2', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_3', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1, 'ra_conv_4', pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, MC_LENGTH), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list, axis=1, cropping=[None, None, 'center'])
        network = layers.BatchNormLayer(network)
        for n in self.net_opts['layer_list']:
            network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                            n, 
                                            nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']), 
                                        self.net_opts['num_class'], 
                                        nonlinearity=lasagne.nonlinearities.softmax)
        
        # print(layers.get_output_shape(network))
        self.network = network
        return self.network
Example #10
0
def load_check_point(train_id, path=None):
    """
    """
    if path is None:
        path = os.getcwd()

    param_fn = os.path.join(path, str(train_id) + '.param.npz')
    config_fn = os.path.join(path, str(train_id) + '.nnconfig.gz')
    params = joblib.load(config_fn)
    mdl, net = build(network(params), params)
    layers = L.get_all_layers(L.ConcatLayer(net.values(), axis=1))

    if os.path.exists(param_fn):
        try:
            print('Loadong pre-trained weight...')
            with np.load(param_fn) as f:
                param_values = [f['arr_%d' % i] for i in range(len(f.files))]
            L.set_all_param_values(layers, param_values)
        except Exception as e:
            print(e)
            print('Cannot load parameters!')
    else:
        print('Cannot find parameters!')

    return net, mdl, params
Example #11
0
def concat_tn(_top, _seed, start=0, num_slices=1):
    if _top==None:
        return L.SliceLayer(_seed, indices=slice(start, start+num_slices), axis=1), start+num_slices
    elif num_slices>0:
        _seed1, n = create_slices_from(_seed, _top.output_shape, start=start, num_slices=num_slices)
        return L.ConcatLayer([_top, _seed1], axis=1), start+n  
    else:
        return _top, start
def output_block(net, config, non_lin, verbose=True):
    """
    """
    # output setting
    out_acts = []
    for out_act in config.hyper_parameters.out_act:
        exec('from lasagne.nonlinearities import {}'.format(out_act))
        out_acts.append(eval(out_act))
    n_outs = config.hyper_parameters.n_out

    # Global Average Pooling
    last_conv_block_name = next(reversed(net))
    net['gap'] = L.GlobalPoolLayer(net[last_conv_block_name], name='gap')
    net['gap.bn'] = L.BatchNormLayer(net['gap'], name='gap.bn')
    n_features = net['gap.bn'].output_shape[-1]

    # feature Layer
    net['fc'] = L.dropout(L.batch_norm(
        L.DenseLayer(net['gap.bn'],
                     num_units=n_features,
                     nonlinearity=non_lin,
                     name='fc')),
                          name='fc.bn.do')

    # output (prediction)
    # check whether the model if for MTL or STL
    # target is passed as list, regardless whether
    # it's MTL or STL (configuration checker checks it)
    targets = config.target
    out_layer_names = []
    for target, n_out, out_act in zip(targets, n_outs, out_acts):

        out_layer_names.append('out.{}'.format(target))

        if target == 'self':
            net[out_layer_names[-1]], inputs = build_siamese(net['fc'])
        else:
            net[out_layer_names[-1]] = L.DenseLayer(net['fc'],
                                                    num_units=n_out,
                                                    nonlinearity=out_act,
                                                    name=out_layer_names[-1])
            inputs = [net['input'].input_var]

    # make a concatation layer just for save/load purpose
    net['IO'] = L.ConcatLayer([
        L.FlattenLayer(net[target_layer_name])
        if target == 'self' else net[target_layer_name]
        for target_layer_name in out_layer_names
    ],
                              name='IO')

    if verbose:
        print(net['gap.bn'].output_shape)
        print(net['fc'].output_shape)
        for target in targets:
            print(net['out.{}'.format(target)].output_shape)

    return net, inputs
Example #13
0
    def __init__(self, output_size, meta_size, depth=2):

        encoder_sizes = [64, 64, 64]

        input_var = TT.matrix()
        meta_var = TT.matrix()
        target_var = TT.matrix()
        mask_var = TT.matrix()

        input_layer = layers.InputLayer((None, output_size), input_var=input_var)
        meta_layer = layers.InputLayer((None, meta_size), input_var=meta_var)
        concat_input_layer = layers.ConcatLayer([input_layer, meta_layer])
        dense = concat_input_layer

        for idx in xrange(depth):
            dense = layers.DenseLayer(dense, encoder_sizes[idx])
            dense = layers.batch_norm(dense)

        mu_and_logvar = layers.DenseLayer(dense, 2 * output_size, nonlinearity=nonlinearities.linear)
        mu = layers.SliceLayer(mu_and_logvar, slice(0, output_size), axis=1)
        log_var = layers.SliceLayer(mu_and_logvar, slice(output_size, None), axis=1)

        loss = neg_log_likelihood2(
            target_var,
            layers.get_output(mu),
            layers.get_output(log_var),
            mask_var
        ).mean()

        test_loss = neg_log_likelihood2(
            target_var,
            layers.get_output(mu, deterministic=True),
            layers.get_output(log_var, deterministic=True),
            mask_var
        ).mean()

        params = layers.get_all_params(mu_and_logvar, trainable=True)
        param_updates = updates.adadelta(loss, params)

        self._train_fn = theano.function(
            [input_var, meta_var, target_var],
            updates=param_updates,
            outputs=loss
        )

        self._loss_fn = theano.function(
            [input_var, meta_var, target_var],
            outputs=test_loss
        )

        self._predict_fn = theano.function(
            [input_var, meta_var],
            outputs=[
                layers.get_output(mu, deterministic=True),
                layers.get_output(log_var, deterministic=True)
            ]
        )
Example #14
0
    def get_generator(self, meanx, z0, y_1hot):
        ''' specify generator G0, gen_x = G0(z0, h1) '''
        """
        #z0 = theano_rng.uniform(size=(self.args.batch_size, 16)) # uniform noise
        gen0_layers = [LL.InputLayer(shape=(self.args.batch_size, 50), input_var=z0)] # Input layer for z0
        gen0_layers.append(nn.batch_norm(LL.DenseLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[0], num_units=128, W=Normal(0.02), nonlinearity=nn.relu)),
                          num_units=128, W=Normal(0.02), nonlinearity=nn.relu))) # embedding, 50 -> 128
        gen0_layer_z_embed = gen0_layers[-1] 

        #gen0_layers.append(LL.InputLayer(shape=(self.args.batch_size, 256), input_var=real_fc3)) # Input layer for real_fc3 in independent training, gen_fc3 in joint training
        gen0_layers.append(LL.InputLayer(shape=(self.args.batch_size, 10), input_var=y_1hot)) # Input layer for real_fc3 in independent training, gen_fc3 in joint training
        gen0_layer_fc3 = gen0_layers[-1]

        gen0_layers.append(LL.ConcatLayer([gen0_layer_fc3,gen0_layer_z_embed], axis=1)) # concatenate noise and fc3 features
        gen0_layers.append(LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[-1], num_units=256*5*5, W=Normal(0.02), nonlinearity=T.nnet.relu)),
                         (self.args.batch_size,256,5,5))) # fc
        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv

        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
        gen0_layers.append(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)) # deconv

        gen_x_pre = LL.get_output(gen0_layers[-1], deterministic=False)
        gen_x = gen_x_pre - meanx
        # gen_x_joint = LL.get_output(gen0_layers[-1], {gen0_layer_fc3: gen_fc3}, deterministic=False) - meanx

        return gen0_layers, gen_x 
        """
        gen_x_layer_z = LL.InputLayer(shape=(self.args.batch_size, self.args.z0dim), input_var=z0) # z, 20
        # gen_x_layer_z_embed = nn.batch_norm(LL.DenseLayer(gen_x_layer_z, num_units=128), g=None) # 20 -> 64

        gen_x_layer_y = LL.InputLayer(shape=(self.args.batch_size, 10), input_var=y_1hot) # conditioned on real fc3 activations
        gen_x_layer_y_z = LL.ConcatLayer([gen_x_layer_y,gen_x_layer_z],axis=1) #512+256 = 768
        gen_x_layer_pool2 = LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen_x_layer_y_z, num_units=256*5*5)), (self.args.batch_size,256,5,5))
        gen_x_layer_dconv2_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_pool2, (self.args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))
        gen_x_layer_dconv2_2 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_1, (self.args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=nn.relu))

        gen_x_layer_dconv1_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_2, (self.args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))
        gen_x_layer_x = nn.Deconv2DLayer(gen_x_layer_dconv1_1, (self.args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)
        # gen_x_layer_x = dnn.Conv2DDNNLayer(gen_x_layer_dconv1_2, 3, (1,1), pad=0, stride=1, 
        #                  W=Normal(0.02), nonlinearity=T.nnet.sigmoid)

        gen_x_layers = [gen_x_layer_z, gen_x_layer_y, gen_x_layer_y_z, gen_x_layer_pool2, gen_x_layer_dconv2_1, 
            gen_x_layer_dconv2_2, gen_x_layer_dconv1_1, gen_x_layer_x]

        gen_x_pre = LL.get_output(gen_x_layer_x, deterministic=False)
        gen_x = gen_x_pre - meanx

        return gen_x_layers, gen_x 
Example #15
0
def get_model(inp, patch_op):

    icnn1 = batch_norm(utils_lasagne.GCNNLayer([icnn, patch_op], 16, nrings=5, nrays=16))
    ffn1 = icnn1

    ffn4 = LL.ConcatLayer([inp,ffn1],axis=1, cropping=None);

    ffn = LL.DenseLayer(ffn4, nclasses, nonlinearity=utils_lasagne.log_softmax)
    return ffn
Example #16
0
def build_cnn():
    data_size = (None, 10, 100)  # Batch size x Img Channels x Height x Width

    input_var = T.tensor3(name="input", dtype='int64')

    values = np.array(np.random.randint(0, 1, (5, 10, 100)))
    input_var.tag.test_value = values
    input_layer = L.InputLayer(data_size, input_var=input_var)

    W = create_char_embedding_matrix()

    embed_layer = L.EmbeddingLayer(input_layer,
                                   input_size=102,
                                   output_size=101,
                                   W=W)

    reshape = L.reshape(embed_layer, (-1, 100, 101))
    dim_shuffle = L.dimshuffle(reshape, (0, 2, 1))
    #conv_layer_1 = L.Conv2DLayer(embed_layer, 4, (1), 1, 0)
    #pool_layer_1 = L.MaxPool1DLayer(conv_layer_1, pool_size=1)
    print L.get_output(dim_shuffle).tag.test_value.shape

    conv_layer_1 = L.Conv1DLayer(dim_shuffle, 50, 2, 1)

    print L.get_output(conv_layer_1).tag.test_value.shape
    print "TEST"
    pool_layer_1 = L.MaxPool1DLayer(conv_layer_1, pool_size=99)
    print L.get_output(pool_layer_1).tag.test_value.shape
    reshape_conv_1 = L.reshape(pool_layer_1, (-1, 50))

    conv_layer_2 = L.Conv1DLayer(dim_shuffle, 50, 3, 1)
    pool_layer_2 = L.MaxPool1DLayer(conv_layer_2, pool_size=98)
    reshape_conv_2 = L.reshape(pool_layer_2, (-1, 50))

    merge_layer = L.ConcatLayer([reshape_conv_1, reshape_conv_2], 1)
    print L.get_output(merge_layer).tag.test_value.shape
    reshape_output = L.reshape(merge_layer, (-1, 10, 100))
    print L.get_output(reshape_output).tag.test_value.shape

    x = T.tensor3(name="testname", dtype='int32')
    #x = T.imatrix()
    #output = L.get_output(conv_layer_1,x)

    #f = theano.function([x],output)

    word = unicode("Tat")
    word_index = np.array([])

    #print word_index

    #x_test = np.array([word_index]).astype('int32')
    #print f(x_test)

    return reshape_output
Example #17
0
def save_check_point(network, params, train_id, path=None):
    """"""
    layers = L.get_all_layers(L.ConcatLayer(network.values(), axis=1))

    if path is None:
        path = os.getcwd()
    param_fn = os.path.join(path, str(train_id) + '.param')
    config_fn = os.path.join(path, str(train_id) + '.nnconfig.gz')

    np.savez(param_fn, *lasagne.layers.get_all_param_values(layers))
    joblib.dump(params, config_fn)
Example #18
0
def build_block(
    incoming,
    num_layers,
    num_filters,
    use_linear_skip=True,
    filter_size=3,
    p=0.1,
    W_init=lasagne.init.GlorotUniform(),
    b_init=None,
    nonlinearity=lasagne.nonlinearities.rectify,
):
    """Builds a block in the DenseNet model."""

    feature_maps = [incoming]

    for i in xrange(num_layers):

        if len(feature_maps) == 1:
            network = incoming
        else:
            network = nn.ConcatLayer(feature_maps, axis=1)

        network = nn.BatchNormLayer(network)
        network = nn.NonlinearityLayer(network, nonlinearity)
        network = nn.Conv2DLayer(network,
                                 num_filters,
                                 filter_size,
                                 pad='same',
                                 W=W_init,
                                 b=b_init)
        if p > 0:
            network = nn.DropoutLayer(network, p=p)
        feature_maps.append(network)

    # Whether to return all connections (vanilla DenseNet), or to return only
    # those feature maps created in the current block used in upscale path for
    # semantic segmentation (100 layer tiramisu)
    if use_linear_skip:
        return nn.ConcatLayer(feature_maps, axis=1)
    return nn.ConcatLayer(feature_maps[1:], axis=1)
Example #19
0
def build_cnn(input):
    #data_size = (None,103,130)  # Batch size x Img Channels x Height x Width

    #input_var = T.tensor3(name = "input",dtype='int64')
    input_var = input

    #values = np.array(np.random.randint(0,102,(1,9,50)))

    #input_var.tag.test_value = values
    #number sentences x words x characters
    input_layer = L.InputLayer((None,9,50), input_var=input)

    W = create_char_embedding_matrix()

    embed_layer = L.EmbeddingLayer(input_layer, input_size=103,output_size=101, W=W)
    #print "EMBED", L.get_output(embed_layer).tag.test_value.shape
    reshape_embed = L.reshape(embed_layer,(-1,50,101))
    #print "reshap embed", L.get_output(reshape_embed).tag.test_value.shape
    conv_layer_1 = L.Conv1DLayer(reshape_embed, 55, 2)
    conv_layer_2 = L.Conv1DLayer(reshape_embed, 55, 3)
    #print "TEST"
    #print "Convolution Layer 1", L.get_output(conv_layer_1).tag.test_value.shape
    #print "Convolution Layer 2", L.get_output(conv_layer_2).tag.test_value.shape

    #flatten_conv_1 = L.flatten(conv_layer_1,3)
    #flatten_conv_2 = L.flatten(conv_layer_2,3)

    #reshape_max_1 = L.reshape(flatten_conv_1,(-1,49))
    #reshape_max_2 = L.reshape(flatten_conv_2, (-1,48))

    #print "OUTPUT Flatten1", L.get_output(flatten_conv_1).tag.test_value.shape
    #print "OUTPUT Flatten2", L.get_output(flatten_conv_2).tag.test_value.shape

    #print "OUTPUT reshape_max_1", L.get_output(reshape_max_1).tag.test_value.shape
    #print "OUTPUT reshape_max_2", L.get_output(reshape_max_2).tag.test_value.shape

    pool_layer_1 = L.MaxPool1DLayer(conv_layer_1, pool_size=54)
    pool_layer_2 = L.MaxPool1DLayer(conv_layer_2, pool_size=53)


    #print "OUTPUT POOL1", L.get_output(pool_layer_1).tag.test_value.shape
    #print "OUTPUT POOL2",L.get_output(pool_layer_2).tag.test_value.shape

    merge_layer = L.ConcatLayer([pool_layer_1, pool_layer_2], 1)

    flatten_merge = L.flatten(merge_layer, 2)
    reshape_merge = L.reshape(flatten_merge, (1,9,110))
    print L.get_output(reshape_embed).shape
    #print L.get_output(reshape_merge).tag.test_value.shape

    return reshape_merge, char_index_lookup
Example #20
0
    def build_network(self, ra_input_var, mc_input_var):
        print('Building raw network with parameters:')
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.net_opts)

        ra_network_1 = layers.InputLayer((None, 1, None), ra_input_var)
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_1',
                                           dropout=False,
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_1')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_2',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_2')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_3',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_3')
        ra_network_1 = self.set_conv_layer(ra_network_1,
                                           'ra_conv_4',
                                           pad='same')
        ra_network_1 = self.set_pool_layer(ra_network_1, 'ra_pool_4')
        concat_list = [ra_network_1]
        mc_input = layers.InputLayer((None, 2, None), mc_input_var)
        concat_list.append(mc_input)
        network = layers.ConcatLayer(concat_list,
                                     axis=1,
                                     cropping=[None, None, 'center'])
        network = self.set_conv_layer(network, 'conv_1')
        network = self.set_pool_layer(network, 'pool_1')
        network = self.set_conv_layer(network, 'conv_2')
        network = self.set_pool_layer(network, 'pool_2')
        network = self.set_conv_layer(network, 'conv_3')
        network = layers.GlobalPoolLayer(
            network, getattr(T, self.net_opts['global_pool_func']))
        # print(layers.get_output_shape(network))
        # network = layers.DenseLayer(layers.dropout(network, p=self.net_opts['dropout_p']),
        #                           self.net_opts['dens_1'],
        #                           nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(
            layers.dropout(network, p=self.net_opts['dropout_p']),
            self.net_opts['dens_2'],
            nonlinearity=lasagne.nonlinearities.rectify)
        network = layers.DenseLayer(
            layers.dropout(network, p=self.net_opts['dropout_p']),
            self.net_opts['num_class'],
            nonlinearity=lasagne.nonlinearities.softmax)
        # print(layers.get_output_shape(network))
        self.network = network
        return self.network
Example #21
0
    def __init__(self,
                 insize,
                 vocoder,
                 mlpg_wins=[],
                 hiddensize=256,
                 nonlinearity=lasagne.nonlinearities.very_leaky_rectify,
                 nblayers=3,
                 bn_axes=None,
                 dropout_p=-1.0,
                 grad_clipping=50):
        if bn_axes is None:
            bn_axes = []  # Recurrent nets don't like batch norm [ref needed]
        model.Model.__init__(self, insize, vocoder, hiddensize)

        if len(bn_axes) > 0:
            warnings.warn(
                'ModelBLSTM: You are using bn_axes={}, but batch normalisation is supposed to make Recurrent Neural Networks (RNNS) unstable [ref. needed]'
                .format(bn_axes))

        l_hid = ll.InputLayer(shape=(None, None, insize),
                              input_var=self._input_values,
                              name='input_conditional')

        for layi in xrange(nblayers):
            layerstr = 'l' + str(1 + layi) + '_BLSTM{}'.format(hiddensize)

            fwd = layer_LSTM(l_hid,
                             hiddensize,
                             nonlinearity=nonlinearity,
                             backwards=False,
                             grad_clipping=grad_clipping,
                             name=layerstr + '.fwd')
            bck = layer_LSTM(l_hid,
                             hiddensize,
                             nonlinearity=nonlinearity,
                             backwards=True,
                             grad_clipping=grad_clipping,
                             name=layerstr + '.bck')
            l_hid = ll.ConcatLayer((fwd, bck), axis=2)

            # Add batch normalisation
            if len(bn_axes) > 0: l_hid = ll.batch_norm(l_hid, axes=bn_axes)

            # Add dropout (after batchnorm)
            if dropout_p > 0.0: l_hid = ll.dropout(l_hid, p=dropout_p)

        l_out = layer_final(l_hid, vocoder, mlpg_wins)

        self.init_finish(
            l_out
        )  # Has to be called at the end of the __init__ to print out the architecture, get the trainable params, etc.
Example #22
0
    def conv_cond_concat(self, aLayer, aY, aYSize):
        if True:
            '''
            aY: theano var.
            '''
            x_shape = aLayer.output_shape
            oned = T.ones((x_shape[0], aYSize, x_shape[2], x_shape[3]))
            y_oned = aY * oned  # [Ymin, Ymax]
            l_aY = ll.InputLayer(input_var=y_oned,
                                 shape=(x_shape[0], aYSize, x_shape[2],
                                        x_shape[3]))
            layer = ll.ConcatLayer([aLayer, l_aY], axis=1)
            return layer

        else:
            return aLayer
Example #23
0
def build_dist_feat_fnc(net,
                        target,
                        conv_feat_locs=[5, 10, 12, 17, 19],
                        fc_feat_locs=[24, 28]):
    """"""
    layers = L.get_all_layers(net[target])
    assert len(layers) == 30  # only works for standard deep conv2d
    feat = [L.GlobalPoolLayer(layers[l]) for l in conv_feat_locs]
    feat += [layers[l] for l in fc_feat_locs]
    feat = L.ConcatLayer(feat, axis=1)
    f = L.get_output(feat, deterministic=True)

    f_feat = {target: {}}
    f_feat[target]['transform'] = theano.function([layers[0].input_var],
                                                  f,
                                                  allow_input_downcast=True)
    return f_feat
 def __build_24_net__(self):
     model12 = self.subnet
     network = layers.InputLayer((None, 3, 24, 24),
                                 input_var=self.__input_var__)
     network = layers.Conv2DLayer(network,
                                  num_filters=16,
                                  filter_size=(5, 5),
                                  stride=1,
                                  nonlinearity=relu)
     network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
     network = layers.DropoutLayer(network)
     network = layers.DenseLayer(network, num_units=128, nonlinearity=relu)
     #network = layers.Conv2DLayer(network,num_filters=128,filter_size=(1,1),stride=1,nonlinearity=relu)
     denselayer12 = model12.net.input_layer  # i.e., one layer before the output layer of model12
     network = layers.ConcatLayer(
         [network,
          denselayer12])  # concatenate with dense layer of this model
     network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
     return network
def get_model(inp, patch_op):
    icnn = LL.DenseLayer(inp, 16)

    icnn1 = batch_norm(
        utils_lasagne.GCNNLayer([icnn, patch_op], 16, nrings=5, nrays=16))
    ffn1 = icnn1

    icnn2 = batch_norm(
        utils_lasagne.GCNNLayer([icnn1, patch_op], 32, nrings=5, nrays=16))
    ffn2 = icnn2

    icnn3 = batch_norm(
        utils_lasagne.GCNNLayer([icnn2, patch_op], 64, nrings=5, nrays=16))
    ffn3 = LL.DenseLayer(icnn3, 512)

    ffn4 = LL.ConcatLayer([inp, ffn1, ffn2, ffn3], axis=1, cropping=None)

    ffn = LL.DenseLayer(ffn4, nclasses, nonlinearity=utils_lasagne.log_softmax)
    return ffn
Example #26
0
def inceptionModule(input_layer, nfilters):
    inception_net = []
    inception_net.append(
        dnn.MaxPool2DDNNLayer(input_layer, pool_size=3, stride=1, pad=1))  #0
    inception_net.append(
        dnn.Conv2DDNNLayer(inception_net[-1],
                           nfilters[0],
                           1,
                           flip_filters=False))  #1

    inception_net.append(
        dnn.Conv2DDNNLayer(input_layer, nfilters[1], 1,
                           flip_filters=False))  #2

    inception_net.append(
        dnn.Conv2DDNNLayer(input_layer, nfilters[2], 1,
                           flip_filters=False))  #3
    inception_net.append(
        dnn.Conv2DDNNLayer(inception_net[-1],
                           nfilters[3],
                           3,
                           pad=1,
                           flip_filters=False))  #4

    inception_net.append(
        dnn.Conv2DDNNLayer(input_layer, nfilters[4], 1,
                           flip_filters=False))  #5
    inception_net.append(
        dnn.Conv2DDNNLayer(inception_net[-1],
                           nfilters[5],
                           5,
                           pad=2,
                           flip_filters=False))  #6

    inception_net.append(
        ll.ConcatLayer([
            inception_net[2],
            inception_net[4],
            inception_net[6],
            inception_net[1],
        ]))  #7

    return inception_net
Example #27
0
def build_transition_up(incoming,
                        incoming_skip,
                        layers_per_block,
                        growth_rate,
                        W_init=lasagne.init.GlorotUniform(),
                        b_init=None):
    """"Builds a transition in the DenseNet model. 

    Transitions consist of the sequence: Batch Normalization, 1x1 Convolution,
    2x2 Average Pooling. The channels can be compressed by specifying 
    0 < m <= 1, where num_channels = channels * m.
    """
    network = nn.TransposedConv2DLayer(incoming,
                                       growth_rate * layers_per_block,
                                       filter_size=3,
                                       stride=2,
                                       crop='valid',
                                       W=W_init,
                                       b=b_init)
    cropping = [None, None, 'center', 'center']
    return nn.ConcatLayer([network, incoming_skip], cropping=cropping)
Example #28
0
    def rnn_encoder(x_sym, x_mask):
        name = "Encoder"
        n_layers = 1
        n_units = 128
        emb_size = 128
        rnn = DropoutLSTMLayer

        l_in = L.InputLayer((None, None), input_var=x_sym)
        l_mask = L.InputLayer((None, None), input_var=x_mask)
        l_emb = DropoutEmbeddingLayer(l_in,
                                      dict_size,
                                      emb_size,
                                      name=name + '.Embedding',
                                      dropout=0.25)
        l_onehot = L.EmbeddingLayer(l_in,
                                    dict_size,
                                    dict_size,
                                    W=np.eye(dict_size, dtype='float32'),
                                    name=name + '.OneHot')
        l_onehot.params[l_onehot.W].remove('trainable')

        l_enc_forwards = rnn(l_emb,
                             num_units=n_units,
                             mask_input=l_mask,
                             name=name + '.0.Forward')
        l_enc_backwards = rnn(l_emb,
                              num_units=n_units,
                              mask_input=l_mask,
                              backwards=True,
                              name=name + '.0.Backward')
        l_enc = L.ConcatLayer([l_enc_forwards, l_enc_backwards], axis=2)

        for i in range(n_layers - 1):
            l_enc = rnn(l_enc,
                        num_units=n_units,
                        mask_input=l_mask,
                        name="%s.%d.Forward" % (name, i + 1),
                        dropout=0.25)

        return l_onehot, l_enc
Example #29
0
def build_network(W,
                  number_unique_tags,
                  longest_word,
                  longest_sentence,
                  input_var=None):
    print("Building network ...")

    input_layer = L.InputLayer((None, longest_sentence, longest_word),
                               input_var=input_var)

    embed_layer = L.EmbeddingLayer(input_layer,
                                   input_size=103,
                                   output_size=101,
                                   W=W)

    reshape_embed = L.reshape(embed_layer, (-1, longest_word, 101))

    conv_layer_1 = L.Conv1DLayer(reshape_embed, longest_word, 2)
    conv_layer_2 = L.Conv1DLayer(reshape_embed, longest_word, 3)

    pool_layer_1 = L.MaxPool1DLayer(conv_layer_1, pool_size=longest_word - 1)
    pool_layer_2 = L.MaxPool1DLayer(conv_layer_2, pool_size=longest_word - 2)

    merge_layer = L.ConcatLayer([pool_layer_1, pool_layer_2], 1)
    flatten_merge = L.flatten(merge_layer, 2)
    reshape_merge = L.reshape(flatten_merge,
                              (-1, longest_sentence, int(longest_word * 2)))

    l_re = lasagne.layers.RecurrentLayer(
        reshape_merge,
        N_HIDDEN,
        nonlinearity=lasagne.nonlinearities.sigmoid,
        mask_input=None)
    l_out = lasagne.layers.DenseLayer(
        l_re, number_unique_tags, nonlinearity=lasagne.nonlinearities.softmax)

    print "DONE BUILDING NETWORK"
    return l_out
Example #30
0
def build_expand_level(incoming,
                       incoming_skip,
                       num_filters,
                       nonlin,
                       W_init=lasagne.init.GlorotUniform(),
                       b_init=lasagne.init.Constant(0.01),
                       filter_size=3):
    """Builds a Conv-Conv-Deconv-Concat block of U-Net."""

    network = nn.Conv2DLayer(incoming,
                             num_filters,
                             filter_size,
                             pad='same',
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.batch_norm(network)
    network = nn.Conv2DLayer(network,
                             num_filters,
                             filter_size,
                             pad='same',
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.batch_norm(network)
    network = nn.TransposedConv2DLayer(network,
                                       num_filters // 2,
                                       filter_size,
                                       stride=2,
                                       crop='valid',
                                       W=W_init,
                                       b=b_init,
                                       nonlinearity=nonlin)
    network = nn.batch_norm(network)

    crop_mode = [None, None, 'center', 'center']
    return nn.ConcatLayer([network, incoming_skip], cropping=crop_mode)