Esempio n. 1
0
def discriminator_3D(input_var=None, num_units=512, seq_length=4):
    discriminator = []
    lrelu = lasagne.nonlinearities.LeakyRectify(0.2)

    discriminator.append(
        ll.InputLayer(shape=(None, seq_length, 3, 80, 160),
                      input_var=input_var))

    # lasagne documentations requires shape :
    # (batch_size, num_input_channels, input_depth, input_rows, input_columns)
    # so we need to change dimension ordering

    discriminator.append(ll.DimshuffleLayer(discriminator[-1],
                                            (0, 2, 1, 3, 4)))

    discriminator.append(
        ll.Conv3DLayer(discriminator[-1],
                       num_filters=num_units / 8,
                       filter_size=5,
                       stride=2,
                       pad=2,
                       nonlinearity=lrelu))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units / 4,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units / 2,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(ll.FlattenLayer(discriminator[-1]))

    discriminator.append(
        ll.DenseLayer(discriminator[-1], num_units=1, nonlinearity=None))

    for layer in discriminator:
        print layer.output_shape
    print ""

    return discriminator
Esempio n. 2
0
def conv4_net(data, ndim, pad='same'):
    res = conv_nonl(data, 6, '1', pad=pad)
    res = conv_nonl(res, 12, '2', pad=pad)
    res = conv_nonl(res, 24, '3', pad=pad)
    res = conv(res, ndim - 1, '4', pad=pad)
    res = L.DimshuffleLayer(res, (0, 2, 3, 1), name='transpose')
    res = L2NormLayer(res, 1e-8, name='l2norm')
    return res
Esempio n. 3
0
def conv4_net_dense_color(data, ndim, pad='same'):
    res = conv_nonl(data, 6, '1', pad=pad)
    res = conv_nonl(res, 12, '2', pad=pad)
    res = conv_nonl(res, 24, '3', pad=pad)
    res = L.concat([data, res], axis=1, name='concat')
    res = L.DimshuffleLayer(res, (0, 2, 3, 1), name='transpose')
    res = L2NormLayer(res, 1e-8, name='l2norm')
    res = NormedDense(res, ndim, name='normed_dense')
    return res
def cls_net(_incoming):
    _drop1 = L.DropoutLayer(_incoming, p=0.2, rescale=True)
    _conv1 = batch_norm(
        conv(_drop1,
             num_filters=64,
             filter_size=7,
             stride=3,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))
    _drop2 = L.DropoutLayer(_conv1, p=0.2, rescale=True)
    _conv2 = batch_norm(
        conv(_drop2,
             num_filters=128,
             filter_size=3,
             stride=1,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))
    _pool2 = L.MaxPool2DLayer(_conv2, pool_size=2)
    _fc1 = batch_norm(
        L.DenseLayer(L.FlattenLayer(_pool2, outdim=2),
                     256,
                     W=I.Normal(0.02),
                     b=None,
                     nonlinearity=NL.rectify))

    _fc2 = L.DenseLayer(_fc1,
                        ny,
                        W=I.Normal(0.02),
                        b=None,
                        nonlinearity=NL.sigmoid)
    _aux = [
        tanh(_conv1),
        tanh(_conv2),
        tanh(L.DimshuffleLayer(_fc1, (0, 1, 'x', 'x'))),
        L.DimshuffleLayer(_fc2, (0, 1, 'x', 'x'))
    ]
    return _aux, _fc2
Esempio n. 5
0
def build_1Dregression_v1(input_var=None,
                          input_width=None,
                          nin_units=12,
                          h_num_units=[64, 64],
                          h_grad_clip=1.0,
                          output_width=1):
    """
    A stacked bidirectional RNN network for regression, alternating
    with dense layers and merging of the two directions, followed by
    a feature mean pooling in the time direction, with a linear
    dim-reduction layer at the start
    
    Args:
        input_var (theano 3-tensor): minibatch of input sequence vectors
        input_width (int): length of input sequences
        nin_units (list): number of NIN features
        h_num_units (int list): no. of units in hidden layer in each stack
                                from bottom to top
        h_grad_clip (float): gradient clipping maximum value 
        output_width (int): size of output layer (e.g. =1 for 1D regression)
    Returns:
        output layer (Lasagne layer object)
    """

    # Non-linearity hyperparameter
    nonlin = lasagne.nonlinearities.LeakyRectify(leakiness=0.15)

    # Input layer
    l_in = LL.InputLayer(shape=(None, 22, input_width), input_var=input_var)
    batchsize = l_in.input_var.shape[0]

    # NIN-layer
    l_in = LL.NINLayer(l_in,
                       num_units=nin_units,
                       nonlinearity=lasagne.nonlinearities.linear)

    l_in_1 = LL.DimshuffleLayer(l_in, (0, 2, 1))

    # RNN layers
    for h in h_num_units:
        # Forward layers
        l_forward_0 = LL.RecurrentLayer(l_in_1,
                                        nonlinearity=nonlin,
                                        num_units=h,
                                        backwards=False,
                                        learn_init=True,
                                        grad_clipping=h_grad_clip,
                                        unroll_scan=True,
                                        precompute_input=True)

        l_forward_0a = LL.ReshapeLayer(l_forward_0, (-1, h))
        l_forward_0b = LL.DenseLayer(l_forward_0a,
                                     num_units=h,
                                     nonlinearity=nonlin)
        l_forward_0c = LL.ReshapeLayer(l_forward_0b,
                                       (batchsize, input_width, h))

        # Backward layers
        l_backward_0 = LL.RecurrentLayer(l_in_1,
                                         nonlinearity=nonlin,
                                         num_units=h,
                                         backwards=True,
                                         learn_init=True,
                                         grad_clipping=h_grad_clip,
                                         unroll_scan=True,
                                         precompute_input=True)

        l_backward_0a = LL.ReshapeLayer(l_backward_0, (-1, h))
        l_backward_0b = LL.DenseLayer(l_backward_0a,
                                      num_units=h,
                                      nonlinearity=nonlin)
        l_backward_0c = LL.ReshapeLayer(l_backward_0b,
                                        (batchsize, input_width, h))

        l_in_1 = LL.ElemwiseSumLayer([l_forward_0c, l_backward_0c])

    # Output layers
    network_0a = LL.ReshapeLayer(l_in_1, (-1, h_num_units[-1]))
    network_0b = LL.DenseLayer(network_0a,
                               num_units=output_width,
                               nonlinearity=nonlin)
    network_0c = LL.ReshapeLayer(network_0b,
                                 (batchsize, input_width, output_width))

    output_net_1 = LL.FlattenLayer(network_0c, outdim=2)
    output_net_2 = LL.FeaturePoolLayer(output_net_1,
                                       pool_size=input_width,
                                       pool_function=T.mean)

    return output_net_2
def cls_net(_incoming,
            noise_sigma=0.05,
            drop_ratio_conv=0.3,
            drop_ratio_fc=0.5):
    _noise = L.GaussianNoiseLayer(_incoming, sigma=noise_sigma)

    _conv1 = conv(_noise,
                  num_filters=128,
                  filter_size=4,
                  stride=2,
                  pad=1,
                  W=I.Normal(0.02),
                  b=I.Constant(0.),
                  nonlinearity=NL.rectify)
    _lrn1 = L.LocalResponseNormalization2DLayer(_conv1,
                                                alpha=0.0001,
                                                k=2,
                                                beta=0.75,
                                                n=5)

    _drop2 = L.DropoutLayer(_lrn1, p=drop_ratio_conv, rescale=True)
    _conv2 = batch_norm_n(
        conv(_drop2,
             num_filters=256,
             filter_size=4,
             stride=2,
             pad=1,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))

    _drop3 = L.DropoutLayer(_conv2, p=drop_ratio_conv, rescale=True)
    _conv3 = batch_norm_n(
        conv(_drop3,
             num_filters=384,
             filter_size=3,
             stride=1,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))

    _drop4 = L.DropoutLayer(_conv3, p=drop_ratio_conv, rescale=True)
    _conv4 = batch_norm_n(
        conv(_drop4,
             num_filters=512,
             filter_size=3,
             stride=1,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))

    _drop5 = L.DropoutLayer(_conv4, p=drop_ratio_fc, rescale=True)
    _conv5 = batch_norm_n(
        conv(_drop5,
             num_filters=1024,
             filter_size=1,
             stride=1,
             pad=0,
             W=I.Normal(0.02),
             b=None,
             nonlinearity=NL.rectify))

    _pool6 = L.GlobalPoolLayer(_conv5, pool_function=theano.tensor.max)  #mean
    _drop6 = L.DropoutLayer(_pool6, p=drop_ratio_fc, rescale=True)
    _fc6 = L.DenseLayer(_drop6,
                        ny,
                        W=I.Normal(0.02),
                        b=I.Constant(0),
                        nonlinearity=NL.softmax)

    _aux = [
        _conv1, _conv2, _conv3, _conv4, _conv5,
        L.DimshuffleLayer(_fc6, (0, 1, 'x', 'x'))
    ]  ### used to have a tanh() around everything except last
    return _aux, _fc6
Esempio n. 7
0
    def __init__(self,
                 incomings,
                 vocab_size,
                 emb_size,
                 A=lasagne.init.Normal(std=0.1),
                 C=lasagne.init.Normal(std=0.1),
                 AT=lasagne.init.Normal(std=0.1),
                 CT=lasagne.init.Normal(std=0.1),
                 nonlin=lasagne.nonlinearities.softmax,
                 RN=0.,
                 **kwargs):
        super(MemoryLayer, self).__init__(incomings, **kwargs)

        self.vocab_size, self.emb_size = vocab_size, emb_size
        self.nonlin = nonlin
        self.RN = RN
        #        self.A, self.C, self.AT, self.CT = A, C, AT, CT

        batch_size, c_count, c_length = self.input_shapes[0]
        _, q_count, _ = self.input_shapes[2]

        self.l_c_in = LL.InputLayer(shape=(batch_size, c_count, c_length))
        self.l_c_in_pe = LL.InputLayer(shape=(batch_size, c_count, c_length,
                                              self.emb_size))
        self.l_u_in = LL.InputLayer(shape=(batch_size, q_count, self.emb_size))

        self.l_c_A_enc = EncodingFullLayer((self.l_c_in, self.l_c_in_pe),
                                           self.vocab_size, self.emb_size, A,
                                           AT)
        self.l_c_C_enc = EncodingFullLayer((self.l_c_in, self.l_c_in_pe),
                                           self.vocab_size, self.emb_size, C,
                                           CT)
        self.A, self.C = self.l_c_A_enc.W, self.l_c_C_enc.W
        self.AT, self.CT = self.l_c_A_enc.WT, self.l_c_C_enc.WT
        if len(incomings
               ) == 4:  # if there is also the probabilities over sentences
            self.l_in_ac_prob = LL.InputLayer(shape=(batch_size, c_count,
                                                     emb_size))
            self.l_c_A_enc_ = LL.ElemwiseMergeLayer(
                (self.l_c_A_enc, self.l_in_ac_prob), merge_function=T.mul)
            self.l_c_C_enc_ = LL.ElemwiseMergeLayer(
                (self.l_c_C_enc, self.l_in_ac_prob), merge_function=T.mul)

        self.l_u_in_tr = LL.DimshuffleLayer(self.l_u_in, pattern=(0, 2, 1))
        if len(incomings) == 4:
            self.l_p = BatchedDotLayer((self.l_c_A_enc_, self.l_u_in_tr))
        else:
            self.l_p = BatchedDotLayer((self.l_c_A_enc, self.l_u_in_tr))

        if self.l_p.output_shape[2] == 1:
            self.l_p = LL.FlattenLayer(self.l_p, outdim=2)


#            self.l_p = LL.DimshuffleLayer(self.l_p, (0, 1))

        if self.nonlin == 'MaxOut':
            raise NotImplementedError
        self.l_p = LL.NonlinearityLayer(self.l_p, nonlinearity=nonlin)
        self.l_p = LL.DimshuffleLayer(self.l_p, (0, 1, 'x'))
        #        self.l_p = LL.ReshapeLayer(self.l_p, self.l_p.output_shape + (1,))
        self.l_p = LL.ExpressionLayer(self.l_p,
                                      lambda X: X.repeat(emb_size, 2),
                                      output_shape='auto')
        ##        self.l_p = RepeatDimLayer(self.l_p, emb_size, axis=2)
        if len(incomings) == 4:
            self.l_pc = LL.ElemwiseMergeLayer((self.l_p, self.l_c_C_enc_),
                                              merge_function=T.mul)
        else:
            self.l_pc = LL.ElemwiseMergeLayer((self.l_p, self.l_c_C_enc),
                                              merge_function=T.mul)
        self.l_o = LL.ExpressionLayer(self.l_pc,
                                      lambda X: X.sum(1),
                                      output_shape='auto')
        #        self.l_o = SumLayer(self.l_pc, axis=1)
        self.l_o = LL.DimshuffleLayer(self.l_o, pattern=(0, 'x', 1))
        self.l_o_u = LL.ElemwiseMergeLayer((self.l_o, self.l_u_in),
                                           merge_function=T.add)

        params = LL.helper.get_all_params(self.l_o_u, trainable=True)
        values = LL.helper.get_all_param_values(self.l_o_u, trainable=True)
        for p, v in zip(params, values):
            self.add_param(p, v.shape, name=p.name)
Esempio n. 8
0
def UNet(data, ndim, pad='same'):
    res = uNet.build(data)
    res = L.Conv2DLayer(res, ndim - 1, 1, nonlinearity=None)
    res = L.DimshuffleLayer(res, (0, 2, 3, 1), name='transpose')
    res = L2NormLayer(res, 1e-8, name='l2norm')
    return res
Esempio n. 9
0
def baseline(data, ndim, pad='same'):
    assert (ndim == 3)
    res = L.DimshuffleLayer(data, (0, 2, 3, 1), name='transpose')
    return res
Esempio n. 10
0
def baseline_norm(data, ndim, pad='same'):
    assert (ndim == 4)
    res = L.DimshuffleLayer(data, (0, 2, 3, 1), name='transpose')
    res = L2NormLayer(res, 1e-8, name='l2norm')
    return res
Esempio n. 11
0
def fcrnn(
        input_var_list,
        early_conv_dict_list,
        late_conv_dict,
        dense_filter_size,
        final_pool_function=T.max,
        input_size_list=[128], output_size=10,
        last_late_conv_size=128,
        p_dropout=0.5, 
        num_feat_type = 1,
        num_lstm_unit = 512,
        gradient_steps = 10
        ):
    assert(len(early_conv_dict_list) == len(input_var_list) ==
           len(input_size_list))

    # early conv layers
    conv_network_list = list()
    total_stride_list = list()
    for jj, [early_conv_dict, input_var, input_size] in enumerate(zip(
            early_conv_dict_list, input_var_list, input_size_list)):
        input_network = lasagne.layers.InputLayer(
            shape=(None, num_feat_type, None, input_size), input_var=input_var)

        total_stride = 1
        network, total_stride = conv_layers(input_network, early_conv_dict,
                                            total_stride,
                                            init_input_size=input_size,
                                            p_dropout=0,
                                            base_name='early{}'.format(jj))
        total_stride_list.append(total_stride)
        conv_network_list.append(network)

    '''
    # upsampling
    conv_network_list = [cl.LocalExtend(net, axis=2, extend_size=ts)
                         for net, ts in zip(conv_network_list,
                                            total_stride_list)]
    '''
    network = layers.ConcatLayer(conv_network_list,
                                 axis=1,
                                 cropping=[None, None, 'lower', None],
                                 name='MultisourceConcatenate')

    # late conv layers (dense layers)
    network, total_stride = conv_layers(network, late_conv_dict,
                                        total_stride,
                                        init_input_size=1,
                                        p_dropout=p_dropout,
                                        base_name='late')

    # frame output layer. every frame has a value
    network = cl.Conv2DXLayer(
        lasagne.layers.dropout(network, p=p_dropout),
        num_filters=last_late_conv_size, filter_size=(dense_filter_size, 1),
        nonlinearity=lasagne.nonlinearities.sigmoid,
        W=lasagne.init.GlorotUniform()
    )
    network = layers.ReshapeLayer(network, ([0], [1], -1))
    network = layers.DimshuffleLayer(network, (0, 2, 1))

    # lstm layers
    l_forward = layers.LSTMLayer(network, output_size,
                                        grad_clipping=100,
                                        gradient_steps=10,
                                        nonlinearity=lasagne.nonlinearities.sigmoid)

    # l_backward = layers.LSTMLayer(l_forward, output_size,
    #                                     grad_clipping=100,
    #                                     gradient_steps=gradient_steps,
    #                                     nonlinearity=lasagne.nonlinearities.sigmoid,
    #                                     backwards=True)

    network = layers.DimshuffleLayer(l_forward, (0, 2, 1))

    # pool
    network = layers.GlobalPoolLayer(network,
                                     pool_function=final_pool_function)
    network = layers.ReshapeLayer(network, ([0], -1))

    return network