def test_dimshuffle_false_get_output_for(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") # this implementation is tested against FilterActs instead of # theano.tensor.nnet.conv.conv2d because using the latter leads to # numerical precision errors. from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs filter_acts = FilterActs(stride=1, pad=0, partial_sum=1) input = theano.shared(floatX(np.random.random((4, 5, 5, 8)))) kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16)))) input_layer = DummyInputLayer((4, 5, 5, 8)) # c01b instead of bc01 layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), dimshuffle=False, W=kernel, b=None, nonlinearity=None) output = np.array(filter_acts(input, kernel).eval()) actual = layer.get_output_for(input).eval() actual = np.array(actual) assert actual.shape == output.shape assert actual.shape == layer.output_shape assert np.allclose(actual, output)
def test_dimshuffle_false_shapes(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((4, 32, 32, 128)) # c01b instead of bc01 layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), dimshuffle=False) assert layer.W.get_value().shape == (4, 3, 3, 16) assert layer.b.get_value().shape == (16,) layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), dimshuffle=False, untie_biases=True) assert layer.W.get_value().shape == (4, 3, 3, 16) assert layer.b.get_value().shape == (16, 30, 30)
def test_unsupported_settings(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((128, 3, 32, 32)) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 5)) assert ("Conv2DCCLayer only supports square filters" in exc.value.args[0]) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), stride=(1, 2)) assert ("Conv2DCCLayer only supports square strides" in exc.value.args[0]) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=15, filter_size=(3, 3)) assert ("Conv2DCCLayer requires num_filters to be a multiple of 16" in exc.value.args[0]) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), pad=(1, 2)) assert ("Conv2DCCLayer only supports square padding" in exc.value.args[0]) input_layer = DummyInputLayer((128, 7, 32, 32)) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3)) assert ("Conv2DCCLayer requires the number of input channels to be " "1, 2, 3 or a multiple of 4" in exc.value.args[0])
def test_pad(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((128, 3, 32, 32)) layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), pad=(3, 3)) assert layer.output_shape == (128, 16, 36, 36)
def test_pad(self, DummyInputLayer): try: from lasagne.layers.cuda_convnet import Conv2DCCLayer except ImportError: pytest.skip("cuda_convnet not available") input_layer = DummyInputLayer((128, 3, 32, 32)) with pytest.raises(RuntimeError) as exc: layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), border_mode='valid', pad=(1, 1)) assert ("You cannot specify both 'border_mode' and 'pad'" in exc.value.args[0]) layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3), pad=(3, 3)) assert layer.output_shape == (128, 16, 36, 36)