def test_init_untie_biases(self, NINLayer_c01b, dummy_input_layer): layer = NINLayer_c01b( dummy_input_layer, num_units=5, untie_biases=True, ) assert (layer.b.shape.eval() == (5, 4, 5)).all()
def test_get_output_for(self, dummy_input_layer, NINLayer_c01b, extra_kwargs): nonlinearity = Mock() layer = NINLayer_c01b( dummy_input_layer, num_units=6, nonlinearity=nonlinearity, **extra_kwargs ) input = theano.shared(np.random.uniform(-1, 1, (3, 4, 5, 2))) result = layer.get_output_for(input) assert result is nonlinearity.return_value nonlinearity_arg = nonlinearity.call_args[0][0] X = input.get_value() W = layer.W.get_value() out = np.dot(W, X.reshape(X.shape[0], -1)) out = out.reshape(W.shape[0], X.shape[1], X.shape[2], X.shape[3]) if layer.b is not None: if layer.untie_biases: out += layer.b.get_value()[..., None] else: out += layer.b.get_value()[:, None, None, None] assert np.allclose(nonlinearity_arg.eval(), out)
def test_init_none_nonlinearity_bias(self, NINLayer_c01b, dummy_input_layer): layer = NINLayer_c01b( dummy_input_layer, num_units=3, nonlinearity=None, b=None, ) assert layer.nonlinearity == lasagne.nonlinearities.identity assert layer.b is None
def layer_vars(self, NINLayer_c01b, dummy_input_layer): W = Mock() b = Mock() nonlinearity = Mock() W.return_value = np.ones((5, 3)) b.return_value = np.ones((5, )) layer = NINLayer_c01b( dummy_input_layer, num_units=5, W=W, b=b, nonlinearity=nonlinearity, ) return { 'W': W, 'b': b, 'nonlinearity': nonlinearity, 'layer': layer, }