Ejemplo n.º 1
0
 def test_wrong_num_leading_axes(self, DenseLayer, dummy_input_layer):
     with pytest.raises(ValueError) as exc:
         DenseLayer(dummy_input_layer, 5, num_leading_axes=3)
     assert "leaving no trailing axes" in exc.value.args[0]
     with pytest.raises(ValueError) as exc:
         DenseLayer(dummy_input_layer, 5, num_leading_axes=-4)
     assert "requesting more trailing axes" in exc.value.args[0]
Ejemplo n.º 2
0
 def test_variable_shape(self, DenseLayer):
     # should work:
     assert DenseLayer((None, 10), 20).output_shape == (None, 20)
     assert DenseLayer((10, None, 10), 20,
                       num_leading_axes=2).output_shape == (10, None, 20)
     # should fail:
     for shape, num_leading_axes in ((10, None), 1), ((10, None, 10), 1):
         with pytest.raises(ValueError) as exc:
             DenseLayer(shape, 20, num_leading_axes=num_leading_axes)
         assert "requires a fixed input shape" in exc.value.args[0]
Ejemplo n.º 3
0
 def test_init_none_nonlinearity(self, DenseLayer, dummy_input_layer):
     layer = DenseLayer(
         dummy_input_layer,
         num_units=3,
         nonlinearity=None,
     )
     assert layer.nonlinearity == lasagne.nonlinearities.identity
Ejemplo n.º 4
0
    def get_layers(self):
        # make into list if nonlin only one
        if not hasattr(self.nonlin_before_merge, '__len__'):
            nonlins_before_merge = ((self.nonlin_before_merge,) *
                len(self.networks))
        else:
            nonlins_before_merge = self.nonlin_before_merge
        layers_per_net = [net.get_layers() for net in self.networks]
        # Check that all have same number of sample preds
        n_sample_preds = get_n_sample_preds(layers_per_net[0][-1])
        for layers in layers_per_net:
            assert get_n_sample_preds(layers[-1]) == n_sample_preds
        # remove dense softmax replace by dense linear
        reduced_layers = [replace_dense_softmax_by_dense_linear(all_l, n_f, 
            nonlin_before_merge=nonlin,
            batch_norm_before_merge=self.batch_norm_before_merge) 
              for all_l, n_f, nonlin in zip(layers_per_net, self.n_features_per_net,
                  nonlins_before_merge)]
        # hopefully still works with new method below:)
        use_same_input_layer(reduced_layers)
            
        final_layers = [layers[-1] for layers in reduced_layers]
        l_merged = ConcatLayer(final_layers)

        l_merged = DenseLayer(l_merged,num_units=self.n_classes,
            nonlinearity=softmax)
        return lasagne.layers.get_all_layers(l_merged)
Ejemplo n.º 5
0
    def layer_vars(self, request, dummy_input_layer, DenseLayer):
        input_shape = dummy_input_layer.shape
        num_units = 5
        num_leading_axes = request.param
        W_shape = (np.prod(input_shape[num_leading_axes:]), num_units)
        b_shape = (num_units, )

        W = Mock()
        b = Mock()
        nonlinearity = Mock()
        W.return_value = np.arange(np.prod(W_shape)).reshape(W_shape)
        b.return_value = np.arange(np.prod(b_shape)).reshape(b_shape) * 3
        layer = DenseLayer(
            dummy_input_layer,
            num_units=num_units,
            num_leading_axes=num_leading_axes,
            W=W,
            b=b,
            nonlinearity=nonlinearity,
        )

        return {
            'input_shape': input_shape,
            'num_units': num_units,
            'num_leading_axes': num_leading_axes,
            'W_shape': W_shape,
            'b_shape': b_shape,
            'W': W,
            'b': b,
            'nonlinearity': nonlinearity,
            'layer': layer,
        }
Ejemplo n.º 6
0
    def test_named_layer_param_names(self, DenseLayer, dummy_input_layer):
        layer = DenseLayer(
            dummy_input_layer,
            num_units=3,
            name = "foo"
            )

        assert layer.W.name == "foo.W"
        assert layer.b.name == "foo.b"
Ejemplo n.º 7
0
def create_lstm(input_vars, num_inputs, depth, hidden_layer_size, num_outputs):
    network = lasagne.layers.InputLayer(shape=(None, 1, 1, num_inputs),
                                        input_var=input_vars)
    #network = GaussianNoiseLayer(network, sigma=0.01)
    nonlin = lasagne.nonlinearities.rectify  # leaky_rectify
    for i in range(depth):
        network = LSTMLayer(network,
                            hidden_layer_size,
                            learn_init=True,
                            nonlinearity=nonlin)

    network = ReshapeLayer(network, (-1, hidden_layer_size))
    network = DenseLayer(network, num_outputs, nonlinearity=softmax)
    return network
Ejemplo n.º 8
0
def example_network(dropout=True):
    model = InputLayer((None, 1, 28, 28))
    model = Pool2DLayer(model, 4, mode='average_inc_pad')

    def conv_layer(incoming, num_filters):
        tmp = Conv2DLayer(incoming, num_filters, 3, pad='valid')
        tmp = BatchNormLayer(tmp)
        if dropout:
            tmp = DropoutLayer(tmp, 0.3)
        return NonlinearityLayer(tmp)

    model = conv_layer(model, 64)
    model = conv_layer(model, 32)
    model = conv_layer(model, 16)

    model = GlobalPoolLayer(model)
    model = DenseLayer(model, 10, nonlinearity=softmax)
    return model
Ejemplo n.º 9
0
def create_rnn(input_vars, num_inputs, depth, hidden_layer_size, num_outputs):
    # network = InputLayer((None, None, num_inputs), input_vars)
    network = lasagne.layers.InputLayer(shape=(None, 1, 1, num_inputs),
                                        input_var=input_vars)
    batch_size_theano, _, _, seqlen = network.input_var.shape

    network = GaussianNoiseLayer(network, sigma=0.05)
    for i in range(depth):
        network = RecurrentLayer(network,
                                 hidden_layer_size,
                                 W_hid_to_hid=GlorotUniform(),
                                 W_in_to_hid=GlorotUniform(),
                                 b=Constant(1.0),
                                 nonlinearity=lasagne.nonlinearities.tanh,
                                 learn_init=True)
    network = ReshapeLayer(network, (-1, hidden_layer_size))
    network = DenseLayer(network, num_outputs, nonlinearity=softmax)

    return network
Ejemplo n.º 10
0
def create_blstm(input_vars, mask_vars, num_inputs, depth, hidden_layer_size,
                 num_outputs):
    network = lasagne.layers.InputLayer(shape=(None, 1, 1, num_inputs),
                                        input_var=input_vars)
    mask = InputLayer((None, None), mask_vars)
    network = GaussianNoiseLayer(network, sigma=0.01)
    for i in range(depth):
        forward = LSTMLayer(network,
                            hidden_layer_size,
                            mask_input=mask,
                            learn_init=True)
        backward = LSTMLayer(network,
                             hidden_layer_size,
                             mask_input=mask,
                             learn_init=True,
                             backwards=True)
        network = ElemwiseSumLayer([forward, backward])
    network = ReshapeLayer(network, (-1, hidden_layer_size))
    network = DenseLayer(network, num_outputs, nonlinearity=softmax)
    return network
Ejemplo n.º 11
0
    def invlayer_vars(self):
        from lasagne.layers.dense import DenseLayer
        from lasagne.layers.input import InputLayer
        from lasagne.layers.special import InverseLayer
        from lasagne.nonlinearities import identity

        l_in = InputLayer(shape=(10, 12))

        layer = DenseLayer(
            l_in,
            num_units=3,
            b=None,
            nonlinearity=identity,
        )

        invlayer = InverseLayer(incoming=layer, layer=layer)

        return {
            'layer': layer,
            'invlayer': invlayer,
        }
Ejemplo n.º 12
0
    def layer_vars(self, dummy_input_layer):
        from lasagne.layers.dense import DenseLayer
        W = Mock()
        b = Mock()
        nonlinearity = Mock()

        W.return_value = np.ones((12, 3))
        b.return_value = np.ones((3, )) * 3
        layer = DenseLayer(
            dummy_input_layer,
            num_units=3,
            W=W,
            b=b,
            nonlinearity=nonlinearity,
        )

        return {
            'W': W,
            'b': b,
            'nonlinearity': nonlinearity,
            'layer': layer,
        }
Ejemplo n.º 13
0
    def test_named_layer_param_names(self, DenseLayer, dummy_input_layer):
        layer = DenseLayer(dummy_input_layer, num_units=3, name="foo")

        assert layer.W.name == "foo" + utils.SCOPE_DELIMITER + "W"
        assert layer.b.name == "foo" + utils.SCOPE_DELIMITER + "b"
Ejemplo n.º 14
0
    def __init__(self,
                 input_layer,
                 num_units,
                 W_in_to_hid=init.Uniform(),
                 W_hid_to_hid=init.Uniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 hid_init=init.Constant(0.),
                 backwards=False,
                 learn_init=False,
                 gradient_steps=-1):
        '''
        Create a recurrent layer.

        :parameters:
            - input_layer : nntools.layers.Layer
                Input to the recurrent layer
            - num_units : int
                Number of hidden units in the layer
            - W_in_to_hid : function or np.ndarray or theano.shared
                Initializer for input-to-hidden weight matrix
            - W_hid_to_hid : function or np.ndarray or theano.shared
                Initializer for hidden-to-hidden weight matrix
            - b : function or np.ndarray or theano.shared
                Initializer for bias vector
            - nonlinearity : function or theano.tensor.elemwise.Elemwise
                Nonlinearity to apply when computing new state
            - hid_init : function or np.ndarray or theano.shared
                Initial hidden state
            - backwards : boolean
                If True, process the sequence backwards
            - learn_init : boolean
                If True, initial hidden values are learned
            - gradient_steps : int
                Number of timesteps to include in backpropagated gradient
                If -1, backpropagate through the entire sequence
        '''

        input_shape = input_layer.get_output_shape()
        # We will be passing the input at each time step to the dense layer,
        # so we need to remove the first dimension
        in_to_hid = DenseLayer(InputLayer((input_shape[0], ) +
                                          input_shape[2:]),
                               num_units,
                               W=W_in_to_hid,
                               b=b,
                               nonlinearity=nonlinearity)
        # The hidden-to-hidden layer expects its inputs to have num_units
        # features because it recycles the previous hidden state
        hid_to_hid = DenseLayer(InputLayer((input_shape[0], num_units)),
                                num_units,
                                W=W_hid_to_hid,
                                b=None,
                                nonlinearity=nonlinearity)

        super(RecurrentLayer, self).__init__(input_layer,
                                             in_to_hid,
                                             hid_to_hid,
                                             nonlinearity=nonlinearity,
                                             hid_init=hid_init,
                                             backwards=backwards,
                                             learn_init=backwards,
                                             gradient_steps=gradient_steps)