コード例 #1
0
def build_fft_scale(x, y, size):
    W = []
    pnet = ll.InputLayer((None, 3, 101, 101), input_var=None)
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(ll.BatchNormLayer(pnet))
    pnet = ll.Pool2DLayer(pnet, (3, 3), (2, 2))
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(
        ll.BatchNormLayer(pnet),
        nonlinearity=l.nonlinearities.LeakyRectify(0.1))
    pnet = ll.Conv2DLayer(pnet, 32, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.BatchNormLayer(pnet)
    x_p, y_p = ll.get_output(pnet, x), ll.get_output(pnet, y)
    z_p = Customfftlayer(x_p, y_p)
    net = ll.InputLayer((None, 64, 50, 50), input_var=z_p)
    net = ll.BatchNormLayer(net)
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad')
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None))

    # return scale different: x_new/x_lod-1
    p_scale = ll.get_output(net)
    #p_scale = theano.gradient.disconnected_grad(p_scale)
    net_scale = ll.InputLayer((None, 10, 25, 25), p_scale)
    net_scale = ll.DenseLayer(net_scale,
                              100,
                              b=None,
                              nonlinearity=l.nonlinearities.tanh)
    W.append(net_scale.get_params(regularizable=True)[0])
    net_scale = ll.DenseLayer(net_scale, 2, b=None, nonlinearity=None)
    # return heatmap with 2 times upsample of size
    net_heat = ll.DenseLayer(net,
                             500,
                             b=None,
                             nonlinearity=l.nonlinearities.tanh)
    W.append(net_heat.get_params(regularizable=True)[0])
    net_heat = ll.DenseLayer(net, size**2, b=None, nonlinearity=None)
    W.append(net_heat.get_params(regularizable=True)[0])
    net_heat = ll.BatchNormLayer(net_heat)
    net_heat = ll.ReshapeLayer(net_heat, ([0], 1, size, size))
    net_heat = ll.Deconv2DLayer(net_heat,
                                64, (5, 5), (2, 2),
                                b=None,
                                crop='same',
                                nonlinearity=None)
    net_heat = ll.BatchNormLayer(net_heat)
    net_heat = ll.Conv2DLayer(net_heat,
                              1, (3, 3),
                              b=None,
                              pad='same',
                              nonlinearity=None)
    W.append(net_heat.get_params(regularizable=True)[0])
    return pnet, net_scale, net_heat, W
コード例 #2
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 3, 224, 224), input_var=self.X)
     layer = Conv2DDNNLayer(layer, num_filters=64, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=64, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=128, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=128, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = layers.DenseLayer(layer, num_units=4096)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=4096)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=1000)
     layer = layers.NonlinearityLayer(layer, nonlinearity=nonlinearities.softmax)
     return layer
コード例 #3
0
def build_siamese(layer):
    """"""
    smx = nonlinearities.softmax
    lnr = nonlinearities.linear
    layers = L.get_all_layers(layer)
    nl = filter(
        lambda l: hasattr(l, 'nonlinearity') and (
            (l.nonlinearity != smx) and (l.nonlinearity != lnr)),
        layers)[0].nonlinearity

    if len(layers[0].output_shape) == 3:
        Xl = T.tensor3('left')
        Xr = T.tensor3('right')
    elif len(layers[0].output_shape) == 4:
        Xl = T.tensor4('left')
        Xr = T.tensor4('right')

    Ol = L.get_output(layer, inputs=Xl)
    # Ol_vl = L.get_output(layer, inputs=Xl, deterministic=True)
    Or = L.get_output(layer, inputs=Xr)
    O = T.concatenate([Ol, Or], axis=-1)

    layer = L.InputLayer((None, layer.output_shape[-1] * 2), input_var=O)
    layer = L.DenseLayer(layer, 128, nonlinearity=None, name='hc1')
    layer = L.BatchNormLayer(layer)
    layer = L.NonlinearityLayer(layer, nonlinearity=nl)
    layer = L.DenseLayer(layer, 2, nonlinearity=smx)

    return layer, (Xl, Xr)
コード例 #4
0
def ptb_lstm(input_var, vocabulary_size, hidden_size, seq_len, num_layers,
             dropout, batch_size):
    l_input = L.InputLayer(shape=(batch_size, seq_len), input_var=input_var)
    l_embed = L.EmbeddingLayer(l_input,
                               vocabulary_size,
                               hidden_size,
                               W=init.Uniform(1.0))
    l_lstms = []
    for i in range(num_layers):
        l_lstm = L.LSTMLayer(l_embed if i == 0 else l_lstms[-1],
                             hidden_size,
                             ingate=L.Gate(W_in=init.GlorotUniform(),
                                           W_hid=init.Orthogonal()),
                             forgetgate=L.Gate(W_in=init.GlorotUniform(),
                                               W_hid=init.Orthogonal(),
                                               b=init.Constant(1.0)),
                             cell=L.Gate(
                                 W_in=init.GlorotUniform(),
                                 W_hid=init.Orthogonal(),
                                 W_cell=None,
                                 nonlinearity=lasagne.nonlinearities.tanh),
                             outgate=L.Gate(W_in=init.GlorotUniform(),
                                            W_hid=init.Orthogonal()))
        l_lstms.append(l_lstm)
    l_drop = L.DropoutLayer(l_lstms[-1], dropout)
    l_out = L.DenseLayer(l_drop, num_units=vocabulary_size, num_leading_axes=2)
    l_out = L.ReshapeLayer(
        l_out,
        (l_out.output_shape[0] * l_out.output_shape[1], l_out.output_shape[2]))
    l_out = L.NonlinearityLayer(l_out,
                                nonlinearity=lasagne.nonlinearities.softmax)
    return l_out
コード例 #5
0
def build_TOY(x, y):
    z_p = T.concatenate((x, y), axis=1)

    net = ll.InputLayer((None, 2, 100, 100), input_var=z_p)
    net = ll.BatchNormLayer(net)
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad')
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad')
    net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None))
    net = ll.DenseLayer(net, 625, b=None, nonlinearity=None)
    net = ll.ReshapeLayer(net, ([0], 1, 25, 25))
    return net
コード例 #6
0
def build_correlation_fft(x, y, size):
    pnet = ll.InputLayer((None, 3, 101, 101), input_var=None)
    pnet = ll.BatchNormLayer(pnet)
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(
        ll.BatchNormLayer(pnet),
        nonlinearity=l.nonlinearities.LeakyRectify(0.1))
    pnet = ll.Pool2DLayer(pnet, (3, 3), stride=(2, 2))
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(
        ll.BatchNormLayer(pnet),
        nonlinearity=l.nonlinearities.LeakyRectify(0.1))
    pnet = ll.Conv2DLayer(pnet, 32, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.BatchNormLayer(pnet)
    x_p, y_p = ll.get_output(pnet, x), ll.get_output(pnet, y)
    x_p, y_p = fft.rfft(x_p, 'ortho'), fft.rfft(y_p, 'ortho')

    XX, XY = T.zeros_like(x_p), T.zeros_like(y_p)
    XX = T.set_subtensor(
        XX[:, :, :, :, 0], x_p[:, :, :, :, 0] * x_p[:, :, :, :, 0] +
        x_p[:, :, :, :, 1] * x_p[:, :, :, :, 1])
    XY = T.set_subtensor(
        XY[:, :, :, :, 0], x_p[:, :, :, :, 0] * y_p[:, :, :, :, 0] +
        x_p[:, :, :, :, 1] * y_p[:, :, :, :, 1])
    XY = T.set_subtensor(
        XY[:, :, :, :, 1], x_p[:, :, :, :, 0] * y_p[:, :, :, :, 1] -
        x_p[:, :, :, :, 1] * y_p[:, :, :, :, 0])
    xx = fft.irfft(XX, 'ortho')
    xy = fft.irfft(XY, 'ortho')

    z_p = T.concatenate((xx, xy), axis=1)
    z_p *= T.constant(hanningwindow(50))
    net = ll.InputLayer((None, 64, 50, 50), input_var=z_p)
    net = ll.BatchNormLayer(net)
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad')
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None))
    net = ll.DenseLayer(net, size**2, b=None, nonlinearity=None)
    net = ll.ReshapeLayer(net, ([0], 1, size, size))
    return pnet, net
コード例 #7
0
def conv_bn_rectify(net, num_filters):
    net = ConvLayer(net,
                    int(num_filters),
                    3,
                    W=init.Normal(),
                    pad=1,
                    nonlinearity=None)
    net = BatchNormLayer(net, epsilon=1e-3)
    net = ll.NonlinearityLayer(net)

    return net
コード例 #8
0
def conv_bn_rectify(net, num_filters):
    net = layers.Conv2DVarDropOutARD(net,
                                     int(num_filters),
                                     3,
                                     W=init.Normal(),
                                     pad=1,
                                     nonlinearity=nl.linear)
    net = BatchNormLayer(net, epsilon=1e-3)
    net = ll.NonlinearityLayer(net)

    return net
コード例 #9
0
def batch_norm(layer, **kwargs):
    """
    Apply batch normalization to an existing layer. This is a convenience
    function modifying an existing layer to include batch normalization: It
    will steal the layer's nonlinearity if there is one (effectively
    introducing the normalization right before the nonlinearity), remove
    the layer's bias if there is one (because it would be redundant), and add
    a :class:`BatchNormLayer` and :class:`NonlinearityLayer` on top.

    Parameters
    ----------
    layer : A :class:`Layer` instance
        The layer to apply the normalization to; note that it will be
        irreversibly modified as specified above
    **kwargs
        Any additional keyword arguments are passed on to the
        :class:`BatchNormLayer` constructor.

    Returns
    -------
    BatchNormLayer or NonlinearityLayer instance
        A batch normalization layer stacked on the given modified `layer`, or
        a nonlinearity layer stacked on top of both if `layer` was nonlinear.

    Examples
    --------
    Just wrap any layer into a :func:`batch_norm` call on creating it:

    >>> from lasagne.layers import InputLayer, DenseLayer, batch_norm
    >>> from lasagne.nonlinearities import tanh
    >>> l1 = InputLayer((64, 768))
    >>> l2 = batch_norm(DenseLayer(l1, num_units=500, nonlinearity=tanh))

    This introduces batch normalization right before its nonlinearity:

    >>> from lasagne.layers import get_all_layers
    >>> [l.__class__.__name__ for l in get_all_layers(l2)]
    ['InputLayer', 'DenseLayer', 'BatchNormLayer', 'NonlinearityLayer']
    """
    nonlinearity = getattr(layer, 'nonlinearity', None)
    if nonlinearity is not None:
        layer.nonlinearity = lasagne.nonlinearities.identity
    if hasattr(layer, 'b') and layer.b is not None:
        del layer.params[layer.b]
        layer.b = None
    layer = BatchNormLayer(layer, **kwargs)
    if nonlinearity is not None:
        layer = L.NonlinearityLayer(layer, nonlinearity)
    return layer
コード例 #10
0
def net_vgglike(k, input_shape, nclass):
    input_x, target_y, Winit = T.tensor4("input"), T.vector(
        "target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)
    net = conv_bn_rectify(net, 64 * k)
    net = ll.DropoutLayer(net, 0.3)
    net = conv_bn_rectify(net, 64 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 128 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 128 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 256 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 256 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 256 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = ll.DenseLayer(net,
                        int(512 * k),
                        W=init.Normal(),
                        nonlinearity=nl.rectify)
    net = BatchNormLayer(net, epsilon=1e-3)
    net = ll.NonlinearityLayer(net)
    net = ll.DropoutLayer(net, 0.5)
    net = ll.DenseLayer(net, nclass, W=init.Normal(), nonlinearity=nl.softmax)

    return net, input_x, target_y, k
コード例 #11
0
def build_transition_down(incoming,
                          reduction,
                          p=0.1,
                          W_init=lasagne.init.GlorotUniform(),
                          b_init=None):
    """"Builds a transition in the DenseNet model. 

    Transitions consist of the sequence: Batch Normalization, 1x1 Convolution,
    2x2 Average Pooling. The channels can be compressed by specifying 
    0 < m <= 1, where num_channels = channels * m.
    """
    num_filters = int(incoming.output_shape[1] * reduction)

    network = nn.BatchNormLayer(incoming)
    network = nn.NonlinearityLayer(network, lasagne.nonlinearities.rectify)
    network = nn.Conv2DLayer(network, num_filters, 1, W=W_init, b=b_init)
    if p > 0:
        network = nn.DropoutLayer(network, p=p)
    return nn.Pool2DLayer(network, 2, 2, mode='max')
コード例 #12
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var=self.X)
     layer = nin(layer,
                 conv_filters=192,
                 filter_size=(5, 5),
                 pad=2,
                 cccp1_filters=160,
                 cccp2_filters=96)
     layer = layers.Pool2DLayer(layer,
                                pool_size=(3, 3),
                                stride=2,
                                pad=(0, 0),
                                ignore_border=False,
                                mode='max')
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = nin(layer,
                 conv_filters=192,
                 filter_size=(5, 5),
                 pad=2,
                 cccp1_filters=192,
                 cccp2_filters=192)
     layer = layers.Pool2DLayer(layer,
                                pool_size=(3, 3),
                                stride=2,
                                ignore_border=False,
                                mode='average_exc_pad')
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = nin(layer,
                 conv_filters=192,
                 filter_size=(3, 3),
                 pad=1,
                 cccp1_filters=192,
                 cccp2_filters=10)
     layer = layers.Pool2DLayer(layer,
                                pool_size=(8, 8),
                                stride=1,
                                ignore_border=False,
                                mode='average_exc_pad')
     layer = layers.flatten(layer, outdim=2)
     layer = layers.NonlinearityLayer(layer,
                                      nonlinearity=nonlinearities.softmax)
     return layer
コード例 #13
0
    def _build_network(self, state_var, action_var):
        """Builds critic network:; inputs: (state, action), outputs: Q-val."""

        # States -> Hidden
        state_in = nn.InputLayer((None, ) + self.state_shape, state_var)
        states = nn.DenseLayer(state_in, 30, W_init, b_init, relu)
        states = nn.DenseLayer(states, 30, W_init, b_init, nonlinearity=None)

        # Actions -> Hidden
        action_in = nn.InputLayer((None, self.num_actions), action_var)
        actions = nn.DenseLayer(action_in,
                                30,
                                W_init,
                                b=None,
                                nonlinearity=None)

        # States_h + Actions_h -> Output
        net = nn.ElemwiseSumLayer([states, actions])
        net = nn.NonlinearityLayer(net, relu)
        return nn.DenseLayer(net, 1, W_out, b_out, nonlinearity=None)
コード例 #14
0
def conv2d(incoming, n_filters, filter_size, stride, pool_size, nonlinearity,
           batch_norm, name, verbose, *args, **kwargs):
    """"""
    if stride is None:
        stride = (1, 1)

    layer = L.Conv2DLayer(incoming,
                          num_filters=n_filters,
                          filter_size=filter_size,
                          stride=stride,
                          pad='same',
                          nonlinearity=None,
                          name=name)
    if batch_norm:
        name += '.bn'
        layer = L.BatchNormLayer(layer, name=name)

    name += '.nonlin'
    layer = L.NonlinearityLayer(layer, nonlinearity=nonlinearity)
    return layer
コード例 #15
0
def build_block(
    incoming,
    num_layers,
    num_filters,
    use_linear_skip=True,
    filter_size=3,
    p=0.1,
    W_init=lasagne.init.GlorotUniform(),
    b_init=None,
    nonlinearity=lasagne.nonlinearities.rectify,
):
    """Builds a block in the DenseNet model."""

    feature_maps = [incoming]

    for i in xrange(num_layers):

        if len(feature_maps) == 1:
            network = incoming
        else:
            network = nn.ConcatLayer(feature_maps, axis=1)

        network = nn.BatchNormLayer(network)
        network = nn.NonlinearityLayer(network, nonlinearity)
        network = nn.Conv2DLayer(network,
                                 num_filters,
                                 filter_size,
                                 pad='same',
                                 W=W_init,
                                 b=b_init)
        if p > 0:
            network = nn.DropoutLayer(network, p=p)
        feature_maps.append(network)

    # Whether to return all connections (vanilla DenseNet), or to return only
    # those feature maps created in the current block used in upscale path for
    # semantic segmentation (100 layer tiramisu)
    if use_linear_skip:
        return nn.ConcatLayer(feature_maps, axis=1)
    return nn.ConcatLayer(feature_maps[1:], axis=1)
コード例 #16
0
ファイル: word2vec.py プロジェクト: julianser/word2vec
    def model(self, query_input, batch_size, query_vocab_size,
              context_vocab_size, emb_dim_size):
        l_input = L.InputLayer(shape=(batch_size, ), input_var=query_input)
        l_embed_continuous = L.EmbeddingLayer(l_input,
                                              input_size=query_vocab_size,
                                              output_size=emb_dim_size)
        l_values_discrete = L.EmbeddingLayer(l_input,
                                             input_size=query_vocab_size,
                                             output_size=emb_dim_size)
        l_probabilities_discrete = L.NonlinearityLayer(
            l_values_discrete, nonlinearity=lasagne.nonlinearities.softmax)
        l_embed_discrete = StochasticLayer(l_probabilities_discrete,
                                           estimator='MF')
        l_merge = L.ElemwiseSumLayer([l_embed_continuous, l_embed_discrete])
        l_out = L.DenseLayer(l_merge,
                             num_units=emb_dim_size,
                             nonlinearity=lasagne.nonlinearities.softmax)

        l_merge_2 = L.ElemwiseMergeLayer([l_out, l_embed_discrete],
                                         merge_function=T.mul)
        l_final_out = L.DenseLayer(l_merge_2, num_units=context_vocab_size)
        return l_values_discrete, l_final_out
コード例 #17
0
def build_segmenter_simple_absurd_res():
    sys.setrecursionlimit(1500)
    inp = ll.InputLayer(shape=(None, 1, None, None), name='input')
    n_layers = 64  # should get a 128 x 128 receptive field
    layers = [inp]
    for i in range(n_layers):
        # every 2 layers, add a skip connection
        layers.append(
            ll.Conv2DLayer(layers[-1],
                           num_filters=8,
                           filter_size=(3, 3),
                           pad='same',
                           W=Orthogonal(),
                           nonlinearity=linear,
                           name='conv%d' % (i + 1)))
        layers.append(ll.BatchNormLayer(layers[-1], name='bn%i' % (i + 1)))
        if (i % 2 == 0) and (i != 0):
            layers.append(
                ll.ElemwiseSumLayer([
                    layers[-1],  # prev layer
                    layers[-6],
                ]  # 3 actual layers per block, skip the previous block
                                    ))
        layers.append(ll.NonlinearityLayer(layers[-1], nonlinearity=rectify))

    # our output layer is also convolutional, remember that our Y is going to be the same exact size as the
    conv_final = ll.Conv2DLayer(layers[-1],
                                num_filters=2,
                                filter_size=(3, 3),
                                pad='same',
                                W=Orthogonal(),
                                name='conv_final',
                                nonlinearity=linear)
    # we need to reshape it to be a (batch*n*m x 3), i.e. unroll s.t. the feature dimension is preserved
    softmax = Softmax4D(conv_final, name='4dsoftmax')

    return [softmax]
コード例 #18
0
def sigmoid(_in, **kwargs):
    return L.NonlinearityLayer(_in, nonlinearity=NL.sigmoid)
コード例 #19
0
gen_layers = [
    LL.InputLayer(shape=(args.batch_size, 10**2 + y_dim), input_var=Gen_input)
]
gen_layers.append(
    LL.DenseLayer(gen_layers[-1],
                  num_units=128,
                  nonlinearity=lasagne.nonlinearities.rectify,
                  W=xavier_init([100 + y_dim, 128])))
gen_layers.append(
    LL.DenseLayer(gen_layers[-1],
                  num_units=784,
                  W=xavier_init([128, 784]),
                  nonlinearity=lasagne.nonlinearities.identity))
gen_layers.append(
    LL.NonlinearityLayer(gen_layers[-1],
                         nonlinearity=lasagne.nonlinearities.sigmoid))

####Discriminator D(X)######
'''
    Composed of only two layers,
    input is an image of dim=784 + number_of_classes, and returns logit value (not softmax probability) in the last layer
'''
dis_layers = [
    LL.InputLayer(shape=(args.batch_size, 28**2 + y_dim), input_var=Dis_input)
]
dis_layers.append(
    LL.DenseLayer(dis_layers[-1],
                  num_units=128,
                  nonlinearity=lasagne.nonlinearities.rectify,
                  W=xavier_init([784 + y_dim, 128])))
dis_layers.append(
コード例 #20
0
ファイル: model.py プロジェクト: tibroc/BirdNET
def resblock(net_in, filters, kernel_size, stride=1, preactivated=True, block_id=1, name=''):

    # Show input shape
    #log.p(("\t\t" + name + " IN SHAPE:", l.get_output_shape(net_in)), new_line=False)

    # Pre-activation
    if block_id > 1:
        net_pre = l.NonlinearityLayer(net_in, nonlinearity=nl.rectify)
    else:
        net_pre = net_in

    # Pre-activated shortcut?
    if preactivated:
        net_in = net_pre

    # Bottleneck Convolution
    if stride > 1:
        net_pre = l.batch_norm(l.Conv2DLayer(net_pre,
                                            num_filters=l.get_output_shape(net_pre)[1],
                                            filter_size=1,
                                            pad='same',
                                            stride=1,
                                            nonlinearity=nl.rectify))
    
    # First Convolution     
    net = l.batch_norm(l.Conv2DLayer(net_pre,
                                   num_filters=l.get_output_shape(net_pre)[1],
                                   filter_size=kernel_size,
                                   pad='same',
                                   stride=1,
                                   nonlinearity=nl.rectify))

    # Pooling layer
    if stride > 1:
        net = l.MaxPool2DLayer(net, pool_size=(stride, stride))

    # Dropout Layer
    net = l.DropoutLayer(net)        

    # Second Convolution
    net = l.batch_norm(l.Conv2DLayer(net,
                        num_filters=filters,
                        filter_size=kernel_size,
                        pad='same',
                        stride=1,
                        nonlinearity=None))

    # Shortcut Layer
    if not l.get_output_shape(net) == l.get_output_shape(net_in):

        # Average pooling
        shortcut = l.Pool2DLayer(net_in, pool_size=(stride, stride), stride=stride, mode='average_exc_pad')

        # Shortcut convolution
        shortcut = l.batch_norm(l.Conv2DLayer(shortcut,
                                 num_filters=filters,
                                 filter_size=1,
                                 pad='same',
                                 stride=1,
                                 nonlinearity=None))        
        
    else:

        # Shortcut = input
        shortcut = net_in
    
    # Merge Layer
    out = l.ElemwiseSumLayer([net, shortcut])

    # Show output shape
    #log.p(("OUT SHAPE:", l.get_output_shape(out), "LAYER:", len(l.get_all_layers(out)) - 1))

    return out
コード例 #21
0
ファイル: model.py プロジェクト: tibroc/BirdNET
def buildNet():

    log.p('BUILDING BirdNET MODEL...', new_line=False)

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))    

    # Pre-processing stage
    #log.p(("\tPRE-PROCESSING STAGE:"))
    net = l.batch_norm(l.Conv2DLayer(net,
                    num_filters=int(FILTERS[0] * RESNET_K),
                    filter_size=(5, 5),
                    pad='same',
                    nonlinearity=nl.rectify))
    
    #log.p(("\t\tFIRST  CONV OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Max pooling
    net = l.MaxPool2DLayer(net, pool_size=(1, 2))
    #log.p(("\t\tPRE-MAXPOOL OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))
    
    # Residual Stacks
    for i in range(1, len(FILTERS)):
        #log.p(("\tRES STACK", i, ':'))
        net = resblock(net,
                       filters=int(FILTERS[i] * RESNET_K),
                       kernel_size=KERNEL_SIZES[i],
                       stride=2,
                       preactivated=True,
                       block_id=i,
                       name='BLOCK ' + str(i) + '-1')
        
        for j in range(1, RESNET_N):
            net = resblock(net,
                           filters=int(FILTERS[i] * RESNET_K),
                           kernel_size=KERNEL_SIZES[i],
                           preactivated=False,
                           block_id=i+j,
                           name='BLOCK ' + str(i) + '-' + str(j + 1))
        
    # Post Activation
    net = l.batch_norm(net)
    net = l.NonlinearityLayer(net, nonlinearity=nl.rectify)
    
    # Classification branch
    #log.p(("\tCLASS BRANCH:"))
    net = classificationBranch(net,  (4, 10)) 
    #log.p(("\t\tBRANCH OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Pooling
    net = l.GlobalPoolLayer(net, pool_function=logmeanexp)
    #log.p(("\tGLOBAL POOLING SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Sigmoid output
    net = l.NonlinearityLayer(net, nonlinearity=nl.sigmoid)

    #log.p(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net))))
    log.p("DONE!")

    # Model stats
    #log.p(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    #log.p(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net
コード例 #22
0
def build_resnet_model():

    log.i('BUILDING RESNET MODEL...')

    # Random Seed
    lasagne_random.set_rng(cfg.getRandomState())

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))

    # First Convolution
    net = l.Conv2DLayer(net,
                        num_filters=cfg.FILTERS[0],
                        filter_size=cfg.KERNEL_SIZES[0],
                        pad='same',
                        W=initialization(cfg.NONLINEARITY),
                        nonlinearity=None)

    log.i(("\tFIRST CONV OUT SHAPE:", l.get_output_shape(net), "LAYER:",
           len(l.get_all_layers(net)) - 1))

    # Residual Stacks
    for i in range(0, len(cfg.FILTERS)):
        net = resblock(net,
                       filters=cfg.FILTERS[i] * cfg.RESNET_K,
                       kernel_size=cfg.KERNEL_SIZES[i],
                       stride=2,
                       num_groups=cfg.NUM_OF_GROUPS[i])
        for _ in range(1, cfg.RESNET_N):
            net = resblock(net,
                           filters=cfg.FILTERS[i] * cfg.RESNET_K,
                           kernel_size=cfg.KERNEL_SIZES[i],
                           num_groups=cfg.NUM_OF_GROUPS[i],
                           preactivated=False)
        log.i(("\tRES STACK", i + 1, "OUT SHAPE:", l.get_output_shape(net),
               "LAYER:", len(l.get_all_layers(net)) - 1))

    # Post Activation
    net = batch_norm(net)
    net = l.NonlinearityLayer(net, nonlinearity=nonlinearity(cfg.NONLINEARITY))

    # Pooling
    net = l.GlobalPoolLayer(net)
    log.i(("\tFINAL POOLING SHAPE:", l.get_output_shape(net), "LAYER:",
           len(l.get_all_layers(net)) - 1))

    # Classification Layer
    net = l.DenseLayer(net,
                       len(cfg.CLASSES),
                       nonlinearity=nonlinearity('identity'),
                       W=initialization('identity'))
    net = l.NonlinearityLayer(net, nonlinearity=nonlinearity('softmax'))

    log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net), "LAYER:",
           len(l.get_all_layers(net))))
    log.i("...DONE!")

    # Model stats
    log.i(("MODEL HAS",
           (sum(hasattr(layer, 'W')
                for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    log.i(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net
コード例 #23
0
def resblock(net_in,
             filters,
             kernel_size,
             stride=1,
             num_groups=1,
             preactivated=True):

    # Preactivation
    net_pre = batch_norm(net_in)
    net_pre = l.NonlinearityLayer(net_pre,
                                  nonlinearity=nonlinearity(cfg.NONLINEARITY))

    # Preactivated shortcut?
    if preactivated:
        net_sc = net_pre
    else:
        net_sc = net_in

    # Stride size
    if cfg.MAX_POOLING:
        s = 1
    else:
        s = stride

    # First Convolution (alwys has preactivated input)
    net = batch_norm(
        l.Conv2DLayer(net_pre,
                      num_filters=filters,
                      filter_size=kernel_size,
                      pad='same',
                      stride=s,
                      num_groups=num_groups,
                      W=initialization(cfg.NONLINEARITY),
                      nonlinearity=nonlinearity(cfg.NONLINEARITY)))

    # Optional pooling layer
    if cfg.MAX_POOLING and stride > 1:
        net = l.MaxPool2DLayer(net, pool_size=stride)

    # Dropout Layer (we support different types of dropout)
    if cfg.DROPOUT_TYPE == 'channels' and cfg.DROPOUT > 0.0:
        net = l.dropout_channels(net, p=cfg.DROPOUT)
    elif cfg.DROPOUT_TYPE == 'location' and cfg.DROPOUT > 0.0:
        net = l.dropout_location(net, p=cfg.DROPOUT)
    elif cfg.DROPOUT > 0.0:
        net = l.DropoutLayer(net, p=cfg.DROPOUT)

    # Second Convolution
    net = l.Conv2DLayer(net,
                        num_filters=filters,
                        filter_size=kernel_size,
                        pad='same',
                        stride=1,
                        num_groups=num_groups,
                        W=initialization(cfg.NONLINEARITY),
                        nonlinearity=None)

    # Shortcut Layer
    if not l.get_output_shape(net) == l.get_output_shape(net_sc):
        shortcut = l.Conv2DLayer(net_sc,
                                 num_filters=filters,
                                 filter_size=1,
                                 pad='same',
                                 stride=s,
                                 W=initialization(cfg.NONLINEARITY),
                                 nonlinearity=None,
                                 b=None)

        # Optional pooling layer
        if cfg.MAX_POOLING and stride > 1:
            shortcut = l.MaxPool2DLayer(shortcut, pool_size=stride)
    else:
        shortcut = net_sc

    # Merge Layer
    out = l.ElemwiseSumLayer([net, shortcut])

    return out
コード例 #24
0
    def __init__(self,
                 incomings,
                 vocab_size,
                 emb_size,
                 A=lasagne.init.Normal(std=0.1),
                 C=lasagne.init.Normal(std=0.1),
                 AT=lasagne.init.Normal(std=0.1),
                 CT=lasagne.init.Normal(std=0.1),
                 nonlin=lasagne.nonlinearities.softmax,
                 RN=0.,
                 **kwargs):
        super(MemoryLayer, self).__init__(incomings, **kwargs)

        self.vocab_size, self.emb_size = vocab_size, emb_size
        self.nonlin = nonlin
        self.RN = RN
        #        self.A, self.C, self.AT, self.CT = A, C, AT, CT

        batch_size, c_count, c_length = self.input_shapes[0]
        _, q_count, _ = self.input_shapes[2]

        self.l_c_in = LL.InputLayer(shape=(batch_size, c_count, c_length))
        self.l_c_in_pe = LL.InputLayer(shape=(batch_size, c_count, c_length,
                                              self.emb_size))
        self.l_u_in = LL.InputLayer(shape=(batch_size, q_count, self.emb_size))

        self.l_c_A_enc = EncodingFullLayer((self.l_c_in, self.l_c_in_pe),
                                           self.vocab_size, self.emb_size, A,
                                           AT)
        self.l_c_C_enc = EncodingFullLayer((self.l_c_in, self.l_c_in_pe),
                                           self.vocab_size, self.emb_size, C,
                                           CT)
        self.A, self.C = self.l_c_A_enc.W, self.l_c_C_enc.W
        self.AT, self.CT = self.l_c_A_enc.WT, self.l_c_C_enc.WT
        if len(incomings
               ) == 4:  # if there is also the probabilities over sentences
            self.l_in_ac_prob = LL.InputLayer(shape=(batch_size, c_count,
                                                     emb_size))
            self.l_c_A_enc_ = LL.ElemwiseMergeLayer(
                (self.l_c_A_enc, self.l_in_ac_prob), merge_function=T.mul)
            self.l_c_C_enc_ = LL.ElemwiseMergeLayer(
                (self.l_c_C_enc, self.l_in_ac_prob), merge_function=T.mul)

        self.l_u_in_tr = LL.DimshuffleLayer(self.l_u_in, pattern=(0, 2, 1))
        if len(incomings) == 4:
            self.l_p = BatchedDotLayer((self.l_c_A_enc_, self.l_u_in_tr))
        else:
            self.l_p = BatchedDotLayer((self.l_c_A_enc, self.l_u_in_tr))

        if self.l_p.output_shape[2] == 1:
            self.l_p = LL.FlattenLayer(self.l_p, outdim=2)


#            self.l_p = LL.DimshuffleLayer(self.l_p, (0, 1))

        if self.nonlin == 'MaxOut':
            raise NotImplementedError
        self.l_p = LL.NonlinearityLayer(self.l_p, nonlinearity=nonlin)
        self.l_p = LL.DimshuffleLayer(self.l_p, (0, 1, 'x'))
        #        self.l_p = LL.ReshapeLayer(self.l_p, self.l_p.output_shape + (1,))
        self.l_p = LL.ExpressionLayer(self.l_p,
                                      lambda X: X.repeat(emb_size, 2),
                                      output_shape='auto')
        ##        self.l_p = RepeatDimLayer(self.l_p, emb_size, axis=2)
        if len(incomings) == 4:
            self.l_pc = LL.ElemwiseMergeLayer((self.l_p, self.l_c_C_enc_),
                                              merge_function=T.mul)
        else:
            self.l_pc = LL.ElemwiseMergeLayer((self.l_p, self.l_c_C_enc),
                                              merge_function=T.mul)
        self.l_o = LL.ExpressionLayer(self.l_pc,
                                      lambda X: X.sum(1),
                                      output_shape='auto')
        #        self.l_o = SumLayer(self.l_pc, axis=1)
        self.l_o = LL.DimshuffleLayer(self.l_o, pattern=(0, 'x', 1))
        self.l_o_u = LL.ElemwiseMergeLayer((self.l_o, self.l_u_in),
                                           merge_function=T.add)

        params = LL.helper.get_all_params(self.l_o_u, trainable=True)
        values = LL.helper.get_all_param_values(self.l_o_u, trainable=True)
        for p, v in zip(params, values):
            self.add_param(p, v.shape, name=p.name)
コード例 #25
0
ファイル: common.py プロジェクト: mborisyak/craynn
dropout = lambda p=0.1, rescale=True: lambda incoming: \
  layers.DropoutLayer(incoming, p=p, rescale=rescale) if p is not None else incoming

batch_norm = lambda axes='auto': lambda incoming: layers.BatchNormLayer(
    incoming, axes=axes)


class Select(object):
    def __getitem__(self, item):
        return lambda incomings: incomings[item]


select = Select()
take = select

nonlinearity = lambda f=None: lambda incoming: layers.NonlinearityLayer(
    incoming, (nonlinearities.LeakyRectify(0.05) if f is None else f))

elementwise = lambda f=T.add: lambda incomings: layers.ElemwiseMergeLayer(
    incomings, f)
elementwise_sum = lambda: lambda incomings: layers.ElemwiseMergeLayer(
    incomings, T.add)
elementwise_mean = lambda: lambda incomings: \
  nonlinearity(f=lambda x: x / len(incomings))(layers.ElemwiseMergeLayer(incomings, T.add))

flatten = lambda outdim=2: lambda incoming: layers.FlattenLayer(incoming,
                                                                outdim=outdim)

feature_pool = lambda pool_size=4, axis=1, f=T.max: lambda incoming: \
  layers.FeaturePoolLayer(incoming, pool_size=pool_size, axis=axis, pool_function=f)
コード例 #26
0
def conv_nonl(data, num_filters, name, pad, use_bn=True):
    res = conv(data, num_filters, name, pad=pad)
    if (use_bn):
        res = L.BatchNormLayer(res, name='bn_' + name)
    res = L.NonlinearityLayer(res, rectify, name='relu_' + name)
    return res
コード例 #27
0
def build_deconv_network():
    input_var = theano.tensor.tensor4('input_var')

    net = {}
    net['input'] = layers.InputLayer(shape=(None, 3, PS, PS),
                                     input_var=input_var)

    # Encoding part
    net['conv1_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['input'],
                               64,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv1_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv1_1'],
                               64,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool1'] = layers.Pool2DLayer(net['conv1_2'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv2_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool1'],
                               128,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv2_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv2_1'],
                               128,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool2'] = layers.Pool2DLayer(net['conv2_2'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv3_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool2'],
                               256,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv3_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv3_1'],
                               256,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv3_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv3_2'],
                               256,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool3'] = layers.Pool2DLayer(net['conv3_3'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv4_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool3'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv4_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv4_1'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv4_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv4_2'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool4'] = layers.Pool2DLayer(net['conv4_3'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv5_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool4'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv5_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv5_1'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv5_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv5_2'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool5'] = layers.Pool2DLayer(net['conv5_3'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['fc6'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool5'],
                               4096,
                               filter_size=(7, 7),
                               stride=1,
                               pad='same')))

    # fc7 is the encoding layer
    net['fc7'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['fc6'],
                               4096,
                               filter_size=(1, 1),
                               stride=1,
                               pad='same')))

    # Decoding part
    net['fc6_deconv'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['fc7'],
                                 512,
                                 filter_size=(7, 7),
                                 stride=1,
                                 crop='same')))
    net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5'])

    net['deconv5_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool5'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv5_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv5_1'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv5_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv5_2'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4'])

    net['deconv4_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool4'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv4_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv4_1'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv4_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv4_2'],
                                 256,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3'])

    net['deconv3_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool3'],
                                 256,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv3_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv3_1'],
                                 256,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv3_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv3_2'],
                                 128,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2'])

    net['deconv2_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool2'],
                                 128,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv2_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv2_1'],
                                 64,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1'])

    net['deconv1_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool1'],
                                 64,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv1_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv1_1'],
                                 64,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))

    # Segmentation layer
    net['seg_score'] = layers.Deconv2DLayer(
        net['deconv1_2'],
        1,
        filter_size=(1, 1),
        stride=1,
        crop='same',
        nonlinearity=lasagne.nonlinearities.sigmoid)

    network = ReshapeLayer(net['seg_score'], ([0], -1))
    output_var = lasagne.layers.get_output(network)
    all_param = lasagne.layers.get_all_params(network, trainable=True)

    return network, input_var, output_var, all_param
コード例 #28
0
    inceptionModule(disc_layers[-1], [64, 112, 144, 288, 32, 64]))
disc_layers.extend(
    inceptionModule(disc_layers[-1], [128, 256, 160, 320, 32, 128]))
disc_layers.append(
    ll.MaxPool2DLayer(disc_layers[-1],
                      pool_size=3,
                      stride=2,
                      ignore_border=False))
disc_layers.extend(
    inceptionModule(disc_layers[-1], [128, 256, 160, 320, 32, 128]))
disc_layers.extend(
    inceptionModule(disc_layers[-1], [128, 384, 192, 384, 48, 128]))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(
    ll.DenseLayer(disc_layers[-1], num_units=1000, nonlinearity=linear))
disc_layers.append(ll.NonlinearityLayer(disc_layers[-1], nonlinearity=softmax))
disc_params = ll.get_all_params(disc_layers, trainable=True)

print("DISCRIMINATOR CREATED")

# costs
labels = T.ivector()
x_lab = T.tensor4()
temp = ll.get_output(disc_layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in disc_layers for u in getattr(l, 'init_updates', [])]

output_before_softmax_lab = ll.get_output(disc_layers[-1],
                                          x_lab,
                                          deterministic=False)

l_lab = output_before_softmax_lab[T.arange(args.batch_size), labels]
コード例 #29
0
    def __init__(
        self,
        image_shape,
        filter_shape,
        num_class,
        conv_type,
        kernel_size,
        kernel_pool_size,
        dropout_rate,
    ):
        """

        """
        self.filter_shape = filter_shape
        self.n_visible = numpy.prod(image_shape)
        self.n_layers = len(filter_shape)
        self.rng = RandomStreams(123)
        self.x = T.matrix()
        self.y = T.ivector()

        self.conv_layers = []

        NoiseLayer = layers.DropoutLayer

        dropout_rate = float(dropout_rate)

        self.l_input = layers.InputLayer((None, self.n_visible), self.x)
        this_layer = layers.ReshapeLayer(self.l_input, ([0], ) + image_shape)

        for l in range(self.n_layers):
            activation = lasagne.nonlinearities.rectify
            if len(filter_shape[l]) == 3:
                if conv_type == 'double' and filter_shape[l][1] > kernel_size:
                    this_layer = DoubleConvLayer(
                        this_layer,
                        filter_shape[l][0],
                        filter_shape[l][1:],
                        pad='same',
                        nonlinearity=activation,
                        kernel_size=kernel_size,
                        kernel_pool_size=kernel_pool_size)
                    this_layer = layers.batch_norm(this_layer)
                elif conv_type == 'maxout':
                    this_layer = layers.Conv2DLayer(this_layer,
                                                    filter_shape[l][0],
                                                    filter_shape[l][1:],
                                                    b=None,
                                                    pad='same',
                                                    nonlinearity=None)
                    this_layer = layers.FeaturePoolLayer(
                        this_layer, pool_size=kernel_pool_size**2)
                    this_layer = layers.BatchNormLayer(this_layer)
                    this_layer = layers.NonlinearityLayer(
                        this_layer, activation)

                elif conv_type == 'cyclic':
                    this_layers = []
                    this_layers.append(
                        layers.Conv2DLayer(this_layer,
                                           filter_shape[l][0],
                                           filter_shape[l][1:],
                                           b=None,
                                           pad='same',
                                           nonlinearity=None))
                    for _ in range(3):
                        W = this_layers[-1].W.dimshuffle(0, 1, 3,
                                                         2)[:, :, :, ::-1]
                        this_layers.append(
                            layers.Conv2DLayer(this_layer,
                                               filter_shape[l][0],
                                               filter_shape[l][1:],
                                               W=W,
                                               b=None,
                                               pad='same',
                                               nonlinearity=None))
                    this_layer = layers.ElemwiseMergeLayer(
                        this_layers, T.maximum)
                    this_layer = layers.BatchNormLayer(this_layer)
                    this_layer = layers.NonlinearityLayer(
                        this_layer, activation)

                elif conv_type == 'standard' \
                     or (conv_type == 'double' and filter_shape[l][1] <= kernel_size):
                    this_layer = layers.Conv2DLayer(this_layer,
                                                    filter_shape[l][0],
                                                    filter_shape[l][1:],
                                                    pad='same',
                                                    nonlinearity=activation)
                    this_layer = layers.batch_norm(this_layer)
                else:
                    raise NotImplementedError

                self.conv_layers.append(this_layer)

            elif len(filter_shape[l]) == 2:
                this_layer = layers.MaxPool2DLayer(this_layer, filter_shape[l])
                this_layer = NoiseLayer(this_layer, dropout_rate)
            elif len(filter_shape[l]) == 1:
                raise NotImplementedError

        self.top_conv_layer = this_layer
        this_layer = layers.GlobalPoolLayer(this_layer, T.mean)
        self.clf_layer = layers.DenseLayer(this_layer,
                                           num_class,
                                           W=lasagne.init.Constant(0.),
                                           nonlinearity=T.nnet.softmax)

        self.params = layers.get_all_params(self.clf_layer, trainable=True)

        self.params_all = layers.get_all_params(self.clf_layer)
コード例 #30
0
def tanh(_in, **kwargs):
    return L.NonlinearityLayer(_in, nonlinearity=NL.tanh)