Esempio n. 1
0
    def test_not_implemented(self):
        try:
            from lasagne.layers.cuda_convnet import MaxPool2DCCLayer
        except ImportError:
            pytest.skip("cuda_convnet not available")

        input_layer = self.input_layer((128, 4, 12, 12))

        with pytest.raises(RuntimeError) as exc:
            layer = MaxPool2DCCLayer(input_layer, pool_size=2, pad=2)
        assert "MaxPool2DCCLayer does not support padding" in exc.value.args[0]

        with pytest.raises(RuntimeError) as exc:
            layer = MaxPool2DCCLayer(input_layer, pool_size=(2, 3))
        assert ("MaxPool2DCCLayer only supports square pooling regions"
                in exc.value.args[0])

        with pytest.raises(RuntimeError) as exc:
            layer = MaxPool2DCCLayer(input_layer, pool_size=2, stride=(1, 2))
        assert (("MaxPool2DCCLayer only supports using the same stride in "
                 "both directions") in exc.value.args[0])

        with pytest.raises(RuntimeError) as exc:
            layer = MaxPool2DCCLayer(input_layer, pool_size=2, stride=3)
        assert ("MaxPool2DCCLayer only supports stride <= pool_size"
                in exc.value.args[0])

        with pytest.raises(RuntimeError) as exc:
            layer = MaxPool2DCCLayer(input_layer,
                                     pool_size=2,
                                     ignore_border=True)
        assert ("MaxPool2DCCLayer does not support ignore_border"
                in exc.value.args[0])
Esempio n. 2
0
def build_model(input_width, input_height, output_dim, batch_size=BATCH_SIZE):
    l_in = lasagne.layers.InputLayer(shape=(batch_size, 1, input_width,
                                            input_height), )

    l_in_c01b = bc01_to_c01b(l_in)

    l_conv1 = layers.NervanaConvLayer(
        l_in_c01b,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
        dimshuffle=False,
    )
    l_pool1 = MaxPool2DCCLayer(l_conv1, ds=(2, 2), dimshuffle=False)

    l_conv2 = layers.NervanaConvLayer(
        l_pool1,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
        dimshuffle=False,
    )
    l_pool2 = MaxPool2DCCLayer(l_conv2, ds=(2, 2), dimshuffle=False)

    l_pool2_bc01 = c01b_to_bc01(l_pool2)

    l_hidden1 = lasagne.layers.DenseLayer(
        l_pool2_bc01,
        num_units=256,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(),
    )

    l_hidden1_dropout = lasagne.layers.DropoutLayer(l_hidden1, p=0.5)

    # l_hidden2 = lasagne.layers.DenseLayer(
    #     l_hidden1_dropout,
    #     num_units=256,
    #     nonlinearity=lasagne.nonlinearities.rectify,
    #     )
    # l_hidden2_dropout = lasagne.layers.DropoutLayer(l_hidden2, p=0.5)

    l_out = lasagne.layers.DenseLayer(
        l_hidden1_dropout,
        num_units=output_dim,
        nonlinearity=lasagne.nonlinearities.softmax,
        W=lasagne.init.GlorotUniform(),
    )

    return l_out
Esempio n. 3
0
 def layer(self, input_layer, pool_size, stride):
     try:
         from lasagne.layers.cuda_convnet import MaxPool2DCCLayer
     except ImportError:
         pytest.skip("cuda_convnet not available")
     return MaxPool2DCCLayer(
         input_layer,
         pool_size=pool_size,
         stride=stride,
     )
Esempio n. 4
0
    def test_dimshuffle_false(self):
        try:
            from lasagne.layers.cuda_convnet import MaxPool2DCCLayer
        except ImportError:
            pytest.skip("cuda_convnet not available")
        from lasagne.layers.input import InputLayer

        input_layer = InputLayer((4, 12, 12, 16))  # c01b order
        layer = MaxPool2DCCLayer(input_layer, pool_size=2, dimshuffle=False)
        assert layer.output_shape == (4, 6, 6, 16)

        input = floatX(np.random.randn(4, 12, 12, 16))
        output = max_pool_2d(input.transpose(3, 0, 1, 2), (2, 2), (2, 2))
        output = output.transpose(1, 2, 3, 0)
        actual = layer.get_output_for(input).eval()
        assert np.allclose(output, actual)