コード例 #1
0
def build_mlp_fn(x0, y0, x1, y1, s0, s1, c, axes):
    """
    Creates an theano function to test the WindowLayer

    Parameters
    ----------
    x0: x coordinate of the left of the window
    y0: y coordinate of the top of the window
    x1: x coordinate of the right of the window
    y1: y coordinate of the bottom of the window
    s0: x shape of the images of the input space
    s1: y shape of the images of the input space
    c: number of channels of the input space
    axes: description of the axes of the input space

    Returns
    -------
    f: a theano function applicating the window layer
    of window (x0, y0, x1, y1).
    """
    mlp = MLP(layers=[WindowLayer('h0', window=(x0, y0, x1, y1))],
              input_space=Conv2DSpace(shape=(s0, s1),
                                      num_channels=c,
                                      axes=axes))
    X = mlp.get_input_space().make_batch_theano()
    f = theano.function([X], mlp.fprop(X))
    return f
コード例 #2
0
def build_mlp_fn(x0, y0, x1, y1, s0, s1, c, axes):
    """
    Creates an theano function to test the WindowLayer

    Parameters
    ----------
    x0: x coordinate of the left of the window
    y0: y coordinate of the top of the window
    x1: x coordinate of the right of the window
    y1: y coordinate of the bottom of the window
    s0: x shape of the images of the input space
    s1: y shape of the images of the input space
    c: number of channels of the input space
    axes: description of the axes of the input space

    Returns
    -------
    f: a theano function applicating the window layer
    of window (x0, y0, x1, y1).
    """
    mlp = MLP(layers=[WindowLayer('h0', window=(x0, y0, x1, y1))],
              input_space=Conv2DSpace(shape=(s0, s1),
                                      num_channels=c, axes=axes))
    X = mlp.get_input_space().make_batch_theano()
    f = theano.function([X], mlp.fprop(X))
    return f
コード例 #3
0
    def test_gradient_clipping(self):
        """
        Create a known gradient and check whether it is being clipped
        correctly
        """
        mlp = MLP(layers=[Linear(dim=1, irange=0, layer_name='linear')],
                  nvis=1)
        W, b = mlp.layers[0].get_params()
        W.set_value([[10]])

        X = mlp.get_input_space().make_theano_batch()
        y = mlp.get_output_space().make_theano_batch()

        cost = Default()
        gradients, _ = cost.get_gradients(mlp, (X, y))

        clipped_cost = GradientClipping(20, Default())
        clipped_gradients, _ = clipped_cost.get_gradients(mlp, (X, y))

        # The MLP defines f(x) = (x W)^2, with df/dW = 2 W x^2
        f = function([X, y], [gradients[W].sum(), clipped_gradients[W].sum()],
                     allow_input_downcast=True)

        # df/dW = df/db = 20 for W = 10, x = 1, so the norm is 20 * sqrt(2)
        # and the gradients should be clipped to 20 / sqrt(2)
        np.testing.assert_allclose(f([[1]], [[0]]), [20, 20 / np.sqrt(2)])
コード例 #4
0
def test_set_get_weights_Softmax():
    """
    Tests setting and getting weights for Softmax layer.
    """
    num_classes = 2
    dim = 3
    conv_dim = [3, 4, 5]

    # VectorSpace input space
    layer = Softmax(num_classes, 's', irange=.1)
    softmax_mlp = MLP(layers=[layer], input_space=VectorSpace(dim=dim))
    vec_weights = np.random.randn(dim, num_classes).astype(config.floatX)
    layer.set_weights(vec_weights)
    assert np.allclose(layer.W.get_value(), vec_weights)
    layer.W.set_value(vec_weights)
    assert np.allclose(layer.get_weights(), vec_weights)

    # Conv2DSpace input space
    layer = Softmax(num_classes, 's', irange=.1)
    softmax_mlp = MLP(layers=[layer],
                      input_space=Conv2DSpace(shape=(conv_dim[0], conv_dim[1]),
                                              num_channels=conv_dim[2]))
    conv_weights = np.random.randn(conv_dim[0], conv_dim[1], conv_dim[2],
                                   num_classes).astype(config.floatX)
    layer.set_weights(conv_weights.reshape(np.prod(conv_dim), num_classes))
    assert np.allclose(layer.W.get_value(),
                       conv_weights.reshape(np.prod(conv_dim), num_classes))
    layer.W.set_value(conv_weights.reshape(np.prod(conv_dim), num_classes))
    assert np.allclose(layer.get_weights_topo(),
                       np.transpose(conv_weights, axes=(3, 0, 1, 2)))
コード例 #5
0
ファイル: test_mlp.py プロジェクト: BloodD/pylearn2
def test_masked_fprop():
    # Construct a dirt-simple linear network with identity weights.
    mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0),
                              Linear(2, 'h1', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_biases(np.arange(3, 5, dtype=mlp.get_weights().dtype))

    # Verify that get_total_input_dimension works.
    np.testing.assert_equal(mlp.get_total_input_dimension(['h0', 'h1']), 4)
    inp = theano.tensor.matrix()

    # Accumulate the sum of output of all masked networks.
    l = []
    for mask in xrange(16):
        l.append(mlp.masked_fprop(inp, mask))
    outsum = reduce(lambda x, y: x + y, l)

    f = theano.function([inp], outsum, allow_input_downcast=True)
    np.testing.assert_equal(f([[5, 3]]), [[144., 144.]])
    np.testing.assert_equal(f([[2, 7]]), [[96., 208.]])

    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 22)
    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2,
                             ['h3'])
    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2,
                             None, 2., {'h3': 4})
コード例 #6
0
ファイル: test_nnet.py プロジェクト: yusuke0519/pylearn2
def test_kl():
    """
    Test whether function kl() has properly processed the input.
    """
    init_mode = theano.config.compute_test_value
    theano.config.compute_test_value = 'raise'
    
    try:
        mlp = MLP(layers=[Sigmoid(dim=10, layer_name='Y', irange=0.1)],
                  nvis=10)
        X = mlp.get_input_space().make_theano_batch()
        Y = mlp.get_output_space().make_theano_batch()
        X.tag.test_value = np.random.random(
            get_debug_values(X)[0].shape).astype(theano.config.floatX)
        Y_hat = mlp.fprop(X)

        # This call should not raise any error:
        ave = kl(Y, Y_hat, 1)

        # The following calls should raise ValueError exceptions:
        Y.tag.test_value[2][3] = 1.1
        np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1)
        Y.tag.test_value[2][3] = -0.1
        np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1)
    
    finally:
        theano.config.compute_test_value = init_mode
コード例 #7
0
ファイル: test_vae.py プロジェクト: rudaoshi/pylearn2
def test_convolutional_compatible():
    """
    VAE allows convolutional encoding networks
    """
    encoding_model = MLP(
        layers=[
            SpaceConverter(
                layer_name='conv2d_converter',
                output_space=Conv2DSpace(shape=[4, 4], num_channels=1)
            ),
            ConvRectifiedLinear(
                layer_name='h',
                output_channels=2,
                kernel_shape=[2, 2],
                kernel_stride=[1, 1],
                pool_shape=[1, 1],
                pool_stride=[1, 1],
                pool_type='max',
                irange=0.01)
            ]
    )
    decoding_model = MLP(layers=[Linear(layer_name='h', dim=16, irange=0.01)])
    prior = DiagonalGaussianPrior()
    conditional = BernoulliVector(mlp=decoding_model, name='conditional')
    posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
    vae = VAE(nvis=16, prior=prior, conditional=conditional,
              posterior=posterior, nhid=16)
    X = T.matrix('X')
    lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
    f = theano.function(inputs=[X], outputs=lower_bound)
    rng = make_np_rng(default_seed=11223)
    f(as_floatX(rng.uniform(size=(10, 16))))
コード例 #8
0
ファイル: test_vae.py プロジェクト: rudaoshi/pylearn2
def test_conditional_initialize_parameters():
    """
    Conditional.initialize_parameters does the following:
    * Set its input_space and ndim attributes
    * Calls its MLP's set_mlp method
    * Sets its MLP's input_space
    * Validates its MLP
    * Sets its params and param names
    """
    mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
                             max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name='conditional')
    vae = DummyVAE()
    conditional.set_vae(vae)
    input_space = VectorSpace(dim=5)
    conditional.initialize_parameters(input_space=input_space, ndim=5)

    testing.assert_same_object(input_space, conditional.input_space)
    testing.assert_equal(conditional.ndim, 5)
    testing.assert_same_object(mlp.get_mlp(), conditional)
    testing.assert_same_object(mlp.input_space, input_space)
    mlp_params = mlp.get_params()
    conditional_params = conditional.get_params()
    assert all([mp in conditional_params for mp in mlp_params])
    assert all([cp in mlp_params for cp in conditional_params])
コード例 #9
0
 def get_model(self, batch_size):
     vis = self.structure[0][0]
     self.model = MLP(layers=self.layers,
                      nvis=vis,
                      batch_size=batch_size,
                      layer_name=None)
     return self.model
コード例 #10
0
    def test_gradient_clipping(self):
        """
        Create a known gradient and check whether it is being clipped
        correctly
        """
        mlp = MLP(layers=[Linear(dim=1, irange=0, layer_name='linear')],
                  nvis=1)
        W, b = mlp.layers[0].get_params()
        W.set_value([[10]])

        X = mlp.get_input_space().make_theano_batch()
        y = mlp.get_output_space().make_theano_batch()

        cost = Default()
        gradients, _ = cost.get_gradients(mlp, (X, y))

        clipped_cost = GradientClipping(20, Default())
        clipped_gradients, _ = clipped_cost.get_gradients(mlp, (X, y))

        # The MLP defines f(x) = (x W)^2, with df/dW = 2 W x^2
        f = function([X, y], [gradients[W].sum(), clipped_gradients[W].sum()],
                     allow_input_downcast=True)

        # df/dW = df/db = 20 for W = 10, x = 1, so the norm is 20 * sqrt(2)
        # and the gradients should be clipped to 20 / sqrt(2)
        np.testing.assert_allclose(f([[1]], [[0]]), [20, 20 / np.sqrt(2)])
コード例 #11
0
ファイル: test_mlp.py プロジェクト: wanasit/pylearn2
def test_init_bias_target_marginals():
    """
    Test `Softmax` layer instantiation with `init_bias_target_marginals`.
    """
    batch_size = 5
    n_features = 5
    n_classes = 3
    n_targets = 3
    irange = 0.1
    learning_rate = 0.1

    X_data = np.random.random(size=(batch_size, n_features))

    Y_categorical = np.asarray([[0], [1], [1], [2], [2]])
    class_frequencies = np.asarray([.2, .4, .4])
    categorical_dataset = DenseDesignMatrix(X_data,
                                            y=Y_categorical,
                                            y_labels=n_classes)

    Y_continuous = np.random.random(size=(batch_size, n_targets))
    Y_means = np.mean(Y_continuous, axis=0)
    continuous_dataset = DenseDesignMatrix(X_data,
                                           y=Y_continuous)

    Y_multiclass = np.random.randint(n_classes,
                                     size=(batch_size, n_targets))
    multiclass_dataset = DenseDesignMatrix(X_data,
                                           y=Y_multiclass,
                                           y_labels=n_classes)

    def softmax_layer(dataset):
        return Softmax(n_classes, 'h0', irange=irange,
                       init_bias_target_marginals=dataset)

    valid_categorical_mlp = MLP(
        layers=[softmax_layer(categorical_dataset)],
        nvis=n_features
    )

    actual = valid_categorical_mlp.layers[0].b.get_value()
    expected = pseudoinverse_softmax_numpy(class_frequencies)
    assert np.allclose(actual, expected)

    valid_continuous_mlp = MLP(
        layers=[softmax_layer(continuous_dataset)],
        nvis=n_features
    )

    actual = valid_continuous_mlp.layers[0].b.get_value()
    expected = pseudoinverse_softmax_numpy(Y_means)
    assert np.allclose(actual, expected)

    def invalid_multiclass_mlp():
        return MLP(
            layers=[softmax_layer(multiclass_dataset)],
            nvis=n_features
        )
    assert_raises(AssertionError, invalid_multiclass_mlp)
コード例 #12
0
ファイル: test_vae.py プロジェクト: JesseLivezey/pylearn2
def test_conditional_returns_lr_scalers():
    """
    Conditional.get_lr_scalers calls its MLP's get_lr_scalers method
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, W_lr_scale=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
    testing.assert_equal(conditional.get_lr_scalers(), mlp.get_lr_scalers())
コード例 #13
0
ファイル: test_mlp.py プロジェクト: KennethPierce/pylearnk
def test_sigmoid_detection_cost():
    # This is only a smoke test: verifies that it compiles and runs,
    # not any particular value.
    rng = np.random.RandomState(0)
    y = (rng.uniform(size=(4, 3)) > 0.5).astype('uint8')
    X = theano.shared(rng.uniform(size=(4, 2)))
    model = MLP(nvis=2, layers=[Sigmoid(monitor_style='detection', dim=3,
                layer_name='y', irange=0.8)])
    y_hat = model.fprop(X)
    model.cost(y, y_hat).eval()
コード例 #14
0
def test_softmax_weight_init():
    """
    Constructs softmax layers with different weight initialization
    parameters.
    """
    nvis = 5
    num_classes = 10
    MLP(layers=[Softmax(num_classes, 's', irange=0.1)], nvis=nvis)
    MLP(layers=[Softmax(num_classes, 's', istdev=0.1)], nvis=nvis)
    MLP(layers=[Softmax(num_classes, 's', sparse_init=2)], nvis=nvis)
コード例 #15
0
ファイル: test_vae.py プロジェクト: rudaoshi/pylearn2
def test_conditional_returns_mlp_weights():
    """
    Conditional.get_weights calls its MLP's get_weights method
    """
    mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01)])
    conditional = DummyConditional(mlp=mlp, name='conditional')
    vae = DummyVAE()
    conditional.set_vae(vae)
    conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
    numpy.testing.assert_equal(conditional.get_weights(), mlp.get_weights())
コード例 #16
0
ファイル: test_mlp.py プロジェクト: KennethPierce/pylearnk
def test_dropout_input_mask_value():
    # Construct a dirt-simple linear network with identity weights.
    mlp = MLP(nvis=2, layers=[IdentityLayer(2, 'h0', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[0].dropout_input_mask_value = -np.inf
    inp = theano.tensor.matrix()
    f = theano.function([inp], mlp.masked_fprop(inp, 1, default_input_scale=1),
                        allow_input_downcast=True)
    np.testing.assert_equal(f([[4., 3.]]), [[4., -np.inf]])
コード例 #17
0
ファイル: test_mlp.py プロジェクト: julius506/pylearn2
def test_softmax_binary_targets():
    """
    Constructs softmax layers with binary target and with vector targets
    to check that they give the same cost.
    """
    num_classes = 10
    batch_size = 20
    mlp_bin = MLP(
        layers=[Softmax(num_classes, 's1', irange=0.1, binary_target_dim=1)],
        nvis=100
    )
    mlp_vec = MLP(
        layers=[Softmax(num_classes, 's1', irange=0.1)],
        nvis=100
    )

    X = mlp_bin.get_input_space().make_theano_batch()
    y_bin = mlp_bin.get_target_space().make_theano_batch()
    y_vec = mlp_vec.get_target_space().make_theano_batch()

    y_hat_bin = mlp_bin.fprop(X)
    y_hat_vec = mlp_vec.fprop(X)
    cost_bin = theano.function([X, y_bin], mlp_bin.cost(y_bin, y_hat_bin),
                               allow_input_downcast=True)
    cost_vec = theano.function([X, y_vec], mlp_vec.cost(y_vec, y_hat_vec),
                               allow_input_downcast=True)

    X_data = np.random.random(size=(batch_size, 100))
    y_bin_data = np.random.randint(low=0, high=10, size=(batch_size, 1))
    y_vec_data = np.zeros((batch_size, num_classes))
    y_vec_data[np.arange(batch_size),y_bin_data.flatten()] = 1
    np.testing.assert_allclose(cost_bin(X_data, y_bin_data),
                               cost_vec(X_data, y_vec_data))
コード例 #18
0
ファイル: test_vae.py プロジェクト: JesseLivezey/pylearn2
def test_conditional_modify_updates():
    """
    Conditional.modify_updates calls its MLP's modify_updates method
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
    updates = OrderedDict(zip(mlp.get_params(), mlp.get_params()))
    testing.assert_equal(conditional.modify_updates(updates), mlp.modify_updates(updates))
コード例 #19
0
ファイル: test_mlp.py プロジェクト: Rt0220/pylearn2
def test_composite_layer():
    """
    Test the routing functionality of the CompositeLayer
    """
    # Without routing
    composite_layer = CompositeLayer('composite_layer',
                                     [Linear(2, 'h0', irange=0),
                                      Linear(2, 'h1', irange=0),
                                      Linear(2, 'h2', irange=0)])
    mlp = MLP(nvis=2, layers=[composite_layer])
    for i in range(3):
        composite_layer.layers[i].set_weights(
            np.eye(2, dtype=theano.config.floatX)
        )
        composite_layer.layers[i].set_biases(
            np.zeros(2, dtype=theano.config.floatX)
        )
    X = theano.tensor.matrix()
    y = mlp.fprop(X)
    funs = [theano.function([X], y_elem) for y_elem in y]
    x_numeric = np.random.rand(2, 2).astype('float32')
    y_numeric = [f(x_numeric) for f in funs]
    assert np.all(x_numeric == y_numeric)

    # With routing
    for inputs_to_layers in [{0: [1], 1: [2], 2: [0]},
                             {0: [1], 1: [0, 2], 2: []},
                             {0: [], 1: []}]:
        composite_layer = CompositeLayer('composite_layer',
                                         [Linear(2, 'h0', irange=0),
                                          Linear(2, 'h1', irange=0),
                                          Linear(2, 'h2', irange=0)],
                                         inputs_to_layers)
        input_space = CompositeSpace([VectorSpace(dim=2),
                                      VectorSpace(dim=2),
                                      VectorSpace(dim=2)])
        mlp = MLP(input_space=input_space, layers=[composite_layer])
        for i in range(3):
            composite_layer.layers[i].set_weights(
                np.eye(2, dtype=theano.config.floatX)
            )
            composite_layer.layers[i].set_biases(
                np.zeros(2, dtype=theano.config.floatX)
            )
        X = [theano.tensor.matrix() for _ in range(3)]
        y = mlp.fprop(X)
        funs = [theano.function(X, y_elem, on_unused_input='ignore')
                for y_elem in y]
        x_numeric = [np.random.rand(2, 2).astype(theano.config.floatX)
                     for _ in range(3)]
        y_numeric = [f(*x_numeric) for f in funs]
        assert all([all([np.all(x_numeric[i] == y_numeric[j])
                         for j in inputs_to_layers[i]])
                    for i in inputs_to_layers])
コード例 #20
0
def test_identity_layer():
    nvis = 10

    mlp = MLP(nvis=nvis, layers=[util.IdentityLayer(layer_name='ident')])

    X = T.matrix()
    f = theano.function([X], mlp.fprop(X))

    for _ in range(5):
        X = np.random.rand(10, nvis).astype(theano.config.floatX)
        yield _test_identity_layer, f, X
コード例 #21
0
ファイル: test_mlp.py プロジェクト: julius506/pylearn2
def test_nested_mlp():
    """
    Constructs a nested MLP and tries to fprop through it
    """
    inner_mlp = MLP(layers=[Linear(10, 'h0', 0.1), Linear(10, 'h1', 0.1)],
                    layer_name='inner_mlp')
    outer_mlp = MLP(layers=[CompositeLayer(layer_name='composite',
                                           layers=[inner_mlp,
                                                   Linear(10, 'h2', 0.1)])],
                    nvis=10)
    X = outer_mlp.get_input_space().make_theano_batch()
    f = theano.function([X], outer_mlp.fprop(X))
    f(np.random.rand(5, 10).astype(theano.config.floatX))
コード例 #22
0
ファイル: test_vae.py プロジェクト: rudaoshi/pylearn2
def test_conditional_modify_updates():
    """
    Conditional.modify_updates calls its MLP's modify_updates method
    """
    mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
                             max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name='conditional')
    vae = DummyVAE()
    conditional.set_vae(vae)
    conditional.initialize_parameters(input_space=VectorSpace(dim=5), ndim=5)
    updates = OrderedDict(zip(mlp.get_params(), mlp.get_params()))
    testing.assert_equal(conditional.modify_updates(updates),
                         mlp.modify_updates(updates))
コード例 #23
0
ファイル: test_vae.py プロジェクト: rudaoshi/pylearn2
def test_vae_automatically_finds_kl_integrator():
    """
    VAE automatically finds the right KLIntegrator
    """
    encoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
    decoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
    prior = DiagonalGaussianPrior()
    conditional = BernoulliVector(mlp=decoding_model, name='conditional')
    posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
    vae = VAE(nvis=10, prior=prior, conditional=conditional,
              posterior=posterior, nhid=5)
    assert (vae.kl_integrator is not None and
            isinstance(vae.kl_integrator, DiagonalGaussianPriorPosteriorKL))
コード例 #24
0
ファイル: test_mlp.py プロジェクト: wanasit/pylearn2
def test_nested_mlp():
    """
    Constructs a nested MLP and tries to fprop through it
    """
    inner_mlp = MLP(layers=[Linear(10, 'h0', 0.1), Linear(10, 'h1', 0.1)],
                    layer_name='inner_mlp')
    outer_mlp = MLP(layers=[CompositeLayer(layer_name='composite',
                                           layers=[inner_mlp,
                                                   Linear(10, 'h2', 0.1)])],
                    nvis=10)
    X = outer_mlp.get_input_space().make_theano_batch()
    f = theano.function([X], outer_mlp.fprop(X))
    f(np.random.rand(5, 10).astype(theano.config.floatX))
コード例 #25
0
ファイル: test_mlp.py プロジェクト: wanasit/pylearn2
def test_input_and_target_source():
    """
    Create a MLP and test input_source and target_source
    for default and non-default options.
    """
    mlp = MLP(
        layers=[CompositeLayer(
            'composite',
            [Linear(10, 'h0', 0.1),
                Linear(10, 'h1', 0.1)],
            {
                0: [1],
                1: [0]
            }
            )
        ],
        input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
        input_source=('features0', 'features1'),
        target_source=('targets0', 'targets1')
    )
    np.testing.assert_equal(mlp.get_input_source(), ('features0', 'features1'))
    np.testing.assert_equal(mlp.get_target_source(), ('targets0', 'targets1'))

    mlp = MLP(
        layers=[Linear(10, 'h0', 0.1)],
        input_space=VectorSpace(15)
    )
    np.testing.assert_equal(mlp.get_input_source(), 'features')
    np.testing.assert_equal(mlp.get_target_source(), 'targets')
コード例 #26
0
ファイル: test_mlp.py プロジェクト: kod3r/pylearn2
def test_masked_fprop():
    # Construct a dirt-simple linear network with identity weights.
    mlp = MLP(nvis=2,
              layers=[Linear(2, 'h0', irange=0),
                      Linear(2, 'h1', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_biases(np.arange(3, 5, dtype=mlp.get_weights().dtype))

    # Verify that get_total_input_dimension works.
    np.testing.assert_equal(mlp.get_total_input_dimension(['h0', 'h1']), 4)
    inp = theano.tensor.matrix()

    # Accumulate the sum of output of all masked networks.
    l = []
    for mask in xrange(16):
        l.append(mlp.masked_fprop(inp, mask))
    outsum = reduce(lambda x, y: x + y, l)

    f = theano.function([inp], outsum)
    np.testing.assert_equal(f([[5, 3]]), [[144., 144.]])
    np.testing.assert_equal(f([[2, 7]]), [[96., 208.]])

    # Verify that using a too-wide mask fails.
    raised = False
    try:
        mlp.masked_fprop(inp, 22)
    except ValueError:
        raised = True
    np.testing.assert_(raised)
コード例 #27
0
ファイル: test_rnn.py プロジェクト: capybaralet/pylearn2
    def test_gradient(self):
        """
        Testing to see whether the gradient can be calculated.
        """
        rnn = MLP(input_space=SequenceSpace(VectorSpace(dim=1)),
                  layers=[Recurrent(dim=2, layer_name='recurrent',
                                    irange=0, nonlinearity=lambda x: x),
                          Linear(dim=1, layer_name='linear', irange=0)])

        X_data, X_mask = rnn.get_input_space().make_theano_batch()
        y_data, y_mask = rnn.get_output_space().make_theano_batch()

        default_cost = Default()
        cost = default_cost.expr(rnn, ((X_data, X_mask), (y_data, y_mask)))
        tensor.grad(cost, rnn.get_params(), disconnected_inputs='ignore')
コード例 #28
0
def check_unimplemented_case(ConvNonlinearity):

    conv_model = MLP(
        input_space = Conv2DSpace(shape = [1,1], axes = ['b', 0, 1, 'c'], num_channels = 1),
        layers = [ConvElemwise(layer_name='conv', nonlinearity = ConvNonlinearity, \
                  output_channels = 1, kernel_shape = [1,1], \
                  pool_shape = [1,1], pool_stride = [1,1], irange= 1.0)],
        batch_size = 1
    )        

    X = conv_model.get_input_space().make_theano_batch()
    Y = conv_model.get_target_space().make_theano_batch()
    Y_hat = conv_model.fprop(X)

    assert np.testing.assert_raises(NotImplementedError, conv_model.cost(Y, Y_hat))
コード例 #29
0
ファイル: DBL_model.py プロジェクト: caomw/Deep_wrapper
    def buildLayer(self):    
        # setup layer
        self.layers = []
        for param in self.p_layers:            
            if param[0].param_type==0:
                self.layers = self.layers + DBL_ConvLayers(param)
            elif param[0].param_type==1:
                self.layers = self.layers + DBL_FcLayers(param)
            elif param[0].param_type==2:
                self.layers = self.layers + DBL_CfLayers(param)        
        self.model = MLP(self.layers, input_space=self.ishape)

        # load available weight
        pre_dl_id = self.param_pkl[:self.param_pkl.rfind('_')+1]
        fns = glob.glob(pre_dl_id+'*.pkl')
        epoch_max = 0
        if len(fns)==0:
            # first time to do it, load matlab prior
            mat_init = 'init_p'+str(self.model_id)+'_'+str(self.train_id)+'.mat'
            if os.path.exists(mat_init):
                print "load initial mat weight: ", mat_init
                self.loadWeight(mat_init)
        else:
            for fn in fns:
                epoch_id = int(fn[fn.rfind('_')+1:fn.find('.pkl')])
                if (epoch_id>epoch_max and epoch_id<=self.num_epoch):
                    epoch_max = epoch_id
            if epoch_max>0:
                print "load weight at epoch: ", epoch_max
                self.loadWeight(pre_dl_id+str(epoch_max)+'.pkl')
                self.num_epoch -= epoch_max
        self.p_monitor['epoch'] = epoch_max
コード例 #30
0
def loadModel2(pklname):

    ishape = Conv2DSpace(shape=[48, 48], num_channels=1)
    nclass = 7
    # create layers
    nk = [30, 40]  # train3040.pkl.cpu
    #nk = [32, 20, 10]
    #nk = [40,30,20]
    ks = [[8, 8], [5, 5], [3, 3]]
    ir = [0.05, 0.05, 0.05]
    ps = [[4, 4], [4, 4], [2, 2]]
    pd = [[2, 2], [2, 2], [2, 2]]
    kn = [0.9, 0.9, 0.9]
    layers = DBL_ConvLayers(nk, ks, ir, ps, pd, kn)
    layer_soft = Softmax(
        layer_name='y',
        #max_col_norm = 1.9365,
        n_classes=nclass,
        #init_bias_target_marginals=DBL.ds_train,
        #istdev = .05
        irange=.0)
    #layers.append(layer_soft)

    # create DBL_model
    model = MLP(layers, input_space=ishape)
    layer_params = cPickle.load(open(pklname))
    layer_id = 0
    for layer in model.layers:
        if layer_id < len(layers) - 1:
            layer.set_weights(layer_params[layer_id][0])
            layer.set_biases(layer_params[layer_id][1])
            layer_id = layer_id + 1
    return model
コード例 #31
0
def get_mlp_softmax(structure):
    n_input, n_output = structure

    # layer = Softmax(n_classes=n_output, irange=0.02, layer_name='y')
    layer = MLP(layers=[Softmax(n_classes=n_output, irange=0.02, layer_name='y')], nvis=500)

    return layer
コード例 #32
0
ファイル: test_convnet.py プロジェクト: zxsted/lisa_emotiw
def test_convnet():
    layers = []
    dataset = get_dataset()
    input_space = Conv2DSpace(shape=[256, 256], num_channels=1)

    conv_layer = ConvRectifiedLinear(output_channels=12,
                                     irange=.005,
                                     layer_name="h0",
                                     kernel_shape=[88, 88],
                                     kernel_stride=[8, 8],
                                     pool_shape=[1, 1],
                                     pool_stride=[1, 1],
                                     max_kernel_norm=1.932)

    layers.append(conv_layer)

    maxout_layer = Maxout(layer_name="h1",
                          irange=.005,
                          num_units=600,
                          num_pieces=4,
                          max_col_norm=1.932)

    layers.append(maxout_layer)
    sigmoid_layer = Sigmoid(layer_name="y",
                            dim=484,
                            monitor_style="detection",
                            irange=.005)

    layers.append(sigmoid_layer)
    model = MLP(batch_size=100, layers=layers, input_space=input_space)

    trainer = get_layer_trainer_sgd(model, dataset)
    trainer.main_loop()
コード例 #33
0
ファイル: test_maxout.py プロジェクト: JakeMick/pylearn2
def test_min_zero():
    """
    This test guards against a bug where the size of the zero buffer used with
    the min_zero flag was specified to have the wrong size. The bug only
    manifested when compiled with optimizations off, because the optimizations
    discard information about the size of the zero buffer.
    """
    mlp = MLP(input_space=VectorSpace(1),
            layers= [Maxout(layer_name="test_layer", num_units=1, num_pieces = 2,
            irange=.05, min_zero=True)])
    X = T.matrix()
    output = mlp.fprop(X)
    # Compile in debug mode so we don't optimize out the size of the buffer
    # of zeros
    f = function([X], output, mode="DEBUG_MODE")
    f(np.zeros((1, 1)).astype(X.dtype))
コード例 #34
0
def construct_model(inputs_shape, filters, bias, kernel_stride, pool_type,
                    pool_shape, pool_stride, conv_class):
    conv_3d_input_space = Conv3DSpace(inputs_shape[1:4],
                                      num_channels=inputs_shape[4],
                                      axes=('b', 0, 1, 2, 'c'))
    conv_3d_layer = Conv3dElemwise(output_channels=filters.shape[0],
                                   kernel_shape=filters.shape[1:4],
                                   kernel_stride=kernel_stride,
                                   layer_name='conv3d_lin',
                                   nonlinearity=IdentityConvNonlinearity(),
                                   conv_transformer_class=conv_class,
                                   pool_transformer_class=CudnnPoolTransformer,
                                   irange=0.001,
                                   pool_type=pool_type,
                                   pool_shape=pool_shape,
                                   pool_stride=pool_stride)
    softmax_layer = Softmax(max_col_norm=2,
                            layer_name='y',
                            n_classes=2,
                            istdev=.05)
    mlp = MLP(input_space=conv_3d_input_space,
              layers=[conv_3d_layer, softmax_layer])
    # convert filters to correct axes (('b', 0, 1, 2, ' c') are test data axes)
    converted_filters = Conv3DSpace.convert_numpy(
        filters, ('b', 0, 1, 2, 'c'), conv_3d_layer.detector_space.axes)
    conv_3d_layer.set_weights(converted_filters)
    conv_3d_layer.set_biases(bias)
    return mlp
コード例 #35
0
ファイル: test_vae.py プロジェクト: rudaoshi/pylearn2
def test_multiple_samples_allowed():
    """
    VAE allows multiple samples per data point
    """
    encoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
    decoding_model = MLP(layers=[Linear(layer_name='h', dim=10, irange=0.01)])
    prior = DiagonalGaussianPrior()
    conditional = BernoulliVector(mlp=decoding_model, name='conditional')
    posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
    vae = VAE(nvis=10, prior=prior, conditional=conditional,
              posterior=posterior, nhid=5)
    X = T.matrix('X')
    lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
    f = theano.function(inputs=[X], outputs=lower_bound)
    rng = make_np_rng(default_seed=11223)
    f(as_floatX(rng.uniform(size=(10, 10))))
コード例 #36
0
ファイル: test_train.py プロジェクト: yo-ga/TextDetector
def test_execution_order():

    # ensure save is called directly after monitoring by checking
    # parameter values in `on_monitor` and `on_save`.

    model = MLP(layers=[Softmax(layer_name='y', n_classes=2, irange=0.)],
                nvis=3)

    dataset = DenseDesignMatrix(X=np.random.normal(size=(6, 3)),
                                y=np.random.normal(size=(6, 2)))

    epoch_counter = EpochCounter(max_epochs=1)

    algorithm = SGD(batch_size=2,
                    learning_rate=0.1,
                    termination_criterion=epoch_counter)

    extension = ParamMonitor()

    train = Train(dataset=dataset,
                  model=model,
                  algorithm=algorithm,
                  extensions=[extension],
                  save_freq=1,
                  save_path="save.pkl")

    # mock save
    train.save = MethodType(only_run_extensions, train)

    train.main_loop()
コード例 #37
0
def test_exhaustive_dropout_average():
    # This is only a smoke test: verifies that it compiles and runs,
    # not any particular value.
    inp = theano.tensor.matrix()
    mlp = MLP(nvis=2,
              layers=[
                  Linear(2, 'h0', irange=0.8),
                  Linear(2, 'h1', irange=0.8),
                  Softmax(3, 'out', irange=0.8)
              ])
    out = exhaustive_dropout_average(mlp, inp)
    f = theano.function([inp], out, allow_input_downcast=True)
    f([[2.3, 4.9]])

    out = exhaustive_dropout_average(mlp, inp, input_scales={'h0': 3})
    f = theano.function([inp], out, allow_input_downcast=True)
    f([[2.3, 4.9]])

    out = exhaustive_dropout_average(mlp, inp, masked_input_layers=['h1'])
    f = theano.function([inp], out, allow_input_downcast=True)
    f([[2.3, 4.9]])

    np.testing.assert_raises(ValueError, exhaustive_dropout_average, mlp, inp,
                             ['h5'])

    np.testing.assert_raises(ValueError, exhaustive_dropout_average, mlp, inp,
                             ['h0'], 2., {'h5': 3.})
コード例 #38
0
def test_multiple_inputs():
    """
    Create a VectorSpacesDataset with two inputs (features0 and features1)
    and train an MLP which takes both inputs for 1 epoch.
    """
    mlp = MLP(layers=[
        FlattenerLayer(
            CompositeLayer('composite',
                           [Linear(10, 'h0', 0.1),
                            Linear(10, 'h1', 0.1)], {
                                0: [1],
                                1: [0]
                            })),
        Softmax(5, 'softmax', 0.1)
    ],
              input_space=CompositeSpace([VectorSpace(15),
                                          VectorSpace(20)]),
              input_source=('features0', 'features1'))
    dataset = VectorSpacesDataset(
        (np.random.rand(20, 20).astype(theano.config.floatX),
         np.random.rand(20, 15).astype(theano.config.floatX),
         np.random.rand(20, 5).astype(theano.config.floatX)),
        (CompositeSpace(
            [VectorSpace(20), VectorSpace(15),
             VectorSpace(5)]), ('features1', 'features0', 'targets')))
    train = Train(dataset, mlp, SGD(0.1, batch_size=5))
    train.algorithm.termination_criterion = EpochCounter(1)
    train.main_loop()
コード例 #39
0
def test_sigmoid_layer_misclass_reporting():
    mlp = MLP(nvis=3, layers=[Sigmoid(layer_name='h0', dim=1, irange=0.005,
                                      monitor_style='classification')])
    target = theano.tensor.matrix(dtype=theano.config.floatX)
    batch = theano.tensor.matrix(dtype=theano.config.floatX)
    rval = mlp.layers[0].get_monitoring_channels_from_state(mlp.fprop(batch), target)

    f = theano.function([batch, target], [tensor.gt(mlp.fprop(batch), 0.5),
                                          rval['misclass']],
                        allow_input_downcast=True)
    rng = np.random.RandomState(0)

    for _ in range(10):  # repeat a few times for statistical strength
        targets = (rng.uniform(size=(30, 1)) > 0.5).astype('uint8')
        out, misclass = f(rng.normal(size=(30, 3)), targets)
        np.testing.assert_allclose((targets != out).mean(), misclass)
コード例 #40
0
ファイル: test_maxout.py プロジェクト: KennethPierce/pylearnk
def test_min_zero():
    """
    This test guards against a bug where the size of the zero buffer used with
    the min_zero flag was specified to have the wrong size. The bug only
    manifested when compiled with optimizations off, because the optimizations
    discard information about the size of the zero buffer.
    """
    mlp = MLP(input_space=VectorSpace(1),
            layers= [Maxout(layer_name="test_layer", num_units=1,
                num_pieces = 2,
            irange=.05, min_zero=True)])
    X = T.matrix()
    output = mlp.fprop(X)
    # Compile in debug mode so we don't optimize out the size of the buffer
    # of zeros
    f = function([X], output, mode="DEBUG_MODE")
    f(np.zeros((1, 1)).astype(X.dtype))
コード例 #41
0
ファイル: main.py プロジェクト: hycis/conditional_computation
def model1():
    #pdb.set_trace()
    # train set X has dim (60,000, 784), y has dim (60,000, 10)
    train_set = MNIST(which_set='train', one_hot=True)
    # test set X has dim (10,000, 784), y has dim (10,000, 10)
    valid_set = MNIST(which_set='test', one_hot=True)
    test_set = MNIST(which_set='test', one_hot=True)

    #import pdb
    #pdb.set_trace()
    #print train_set.X.shape[1]

    # =====<Create the MLP Model>=====

    h2_layer = NoisyRELU(layer_name='h1',
                         sparse_init=15,
                         noise_factor=5,
                         dim=1000,
                         desired_active_rate=0.2,
                         bias_factor=20,
                         max_col_norm=1)
    #h2_layer = RectifiedLinear(layer_name='h2', dim=100, sparse_init=15, max_col_norm=1)
    #print h1_layer.get_params()
    #h2 = RectifiedLinear(layer_name='h2', dim=500, sparse_init=15, max_col_norm=1)
    y_layer = Softmax(layer_name='y', n_classes=10, irange=0., max_col_norm=1)

    mlp = MLP(batch_size=200,
              input_space=VectorSpace(dim=train_set.X.shape[1]),
              layers=[h2_layer, y_layer])

    # =====<Create the SGD algorithm>=====
    sgd = SGD(init_momentum=0.1,
              learning_rate=0.01,
              monitoring_dataset={'valid': valid_set},
              cost=MethodCost('cost_from_X'),
              termination_criterion=MonitorBased(
                  channel_name='valid_y_misclass', prop_decrease=0.001, N=50))
    #sgd.setup(model=mlp, dataset=train_set)

    # =====<Extensions>=====
    ext = [MomentumAdjustor(start=1, saturate=10, final_momentum=0.9)]

    # =====<Create Training Object>=====
    save_path = './mlp_model1.pkl'
    train_obj = Train(dataset=train_set,
                      model=mlp,
                      algorithm=sgd,
                      extensions=ext,
                      save_path=save_path,
                      save_freq=0)
    #train_obj.setup_extensions()

    #import pdb
    #pdb.set_trace()
    train_obj.main_loop()

    # =====<Run the training>=====
    '''
コード例 #42
0
def test_show_weights():
    """
    Create a pickled model and show the weights
    """
    skip_if_no_matplotlib()
    with open('model.pkl', 'wb') as f:
        model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
                    nvis=784)
        model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
        which_set: 'train'
}
"""
        cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
    show_weights('model.pkl', rescale='individual',
                 border=True, out='garbage.png')
    os.remove('model.pkl')
    os.remove('garbage.png')
コード例 #43
0
ファイル: test_vae.py プロジェクト: JesseLivezey/pylearn2
def test_conditional_encode_conditional_parameters():
    """
    Conditional.encode_conditional_parameters calls its MLP's fprop method
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    input_space = VectorSpace(dim=5)
    conditional.initialize_parameters(input_space=input_space, ndim=5)

    X = T.matrix("X")
    mlp_Y1, mlp_Y2 = mlp.fprop(X)
    cond_Y1, cond_Y2 = conditional.encode_conditional_params(X)
    f = theano.function([X], [mlp_Y1, mlp_Y2, cond_Y1, cond_Y2])
    rval = f(as_floatX(numpy.random.uniform(size=(10, 5))))
    numpy.testing.assert_allclose(rval[0], rval[2])
    numpy.testing.assert_allclose(rval[1], rval[3])
コード例 #44
0
ファイル: test_mlp.py プロジェクト: BloodD/pylearn2
def test_batchwise_dropout():
    mlp = MLP(nvis=2, layers=[IdentityLayer(2, 'h0', irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[0].dropout_input_mask_value = 0
    inp = theano.tensor.matrix()
    f = theano.function([inp], mlp.dropout_fprop(inp, per_example=False),
                        allow_input_downcast=True)
    for _ in range(10):
        d = f([[3.0, 4.5]] * 3)
        np.testing.assert_equal(d[0], d[1])
        np.testing.assert_equal(d[0], d[2])

    f = theano.function([inp], mlp.dropout_fprop(inp, per_example=True),
                        allow_input_downcast=True)
    d = f([[3.0, 4.5]] * 3)
    print d
    np.testing.assert_(np.any(d[0] != d[1]) or np.any(d[0] != d[2]))
コード例 #45
0
ファイル: test_rnn.py プロジェクト: capybaralet/pylearn2
    def test_cost(self):
        """
        Use an RNN to calculate Mersenne number sequences of different
        lengths and check whether the costs make sense.
        """
        rnn = MLP(input_space=SequenceSpace(VectorSpace(dim=1)),
                  layers=[Recurrent(dim=1, layer_name='recurrent',
                                    irange=0, nonlinearity=lambda x: x),
                          Linear(dim=1, layer_name='linear', irange=0)])
        W, U, b = rnn.layers[0].get_params()
        W.set_value([[1]])
        U.set_value([[2]])

        W, b = rnn.layers[1].get_params()
        W.set_value([[1]])

        X_data, X_mask = rnn.get_input_space().make_theano_batch()
        y_data, y_mask = rnn.get_output_space().make_theano_batch()
        y_data_hat, y_mask_hat = rnn.fprop((X_data, X_mask))

        seq_len = 20
        X_data_vals = np.ones((seq_len, seq_len, 1))
        X_mask_vals = np.triu(np.ones((seq_len, seq_len)))
        y_data_vals = np.tile((2 ** np.arange(1, seq_len + 1) - 1),
                              (seq_len, 1)).T[:, :, np.newaxis]
        y_mask_vals = np.triu(np.ones((seq_len, seq_len)))

        f = function([X_data, X_mask, y_data, y_mask],
                     rnn.cost((y_data, y_mask), (y_data_hat, y_mask_hat)),
                     allow_input_downcast=True)
        # The cost for two exact sequences should be zero
        assert f(X_data_vals, X_mask_vals, y_data_vals, y_mask_vals) == 0
        # If the input is different, the cost should be non-zero
        assert f(X_data_vals + 1, X_mask_vals, y_data_vals, y_mask_vals) != 0
        # And same for the target data; using squared L2 norm, so should be 1
        assert f(X_data_vals, X_mask_vals, y_data_vals + 1, y_mask_vals) == 1
        # But if the masked data changes, the cost should remain the same
        X_data_vals_plus = X_data_vals + (1 - X_mask_vals[:, :, None])
        assert f(X_data_vals_plus, X_mask_vals, y_data_vals, y_mask_vals) == 0
        y_data_vals_plus = y_data_vals + (1 - y_mask_vals[:, :, None])
        assert f(X_data_vals, X_mask_vals, y_data_vals_plus, y_mask_vals) == 0
コード例 #46
0
def test_correctness():
    model = MLP(
        layers=[Linear(dim=10, layer_name='linear', irange=1.0),
                Softmax(n_classes=2, layer_name='softmax', irange=1.0)],
        batch_size=10,
        nvis=10
    )

    cost = LpPenalty(variables=model.get_params(), p=2)

    penalty = cost.expr(model, None)

    penalty_function = theano.function(inputs=[], outputs=penalty)

    p = penalty_function()

    actual_p = 0
    for param in model.get_params():
        actual_p += numpy.sum(param.get_value() ** 2)

    assert numpy.allclose(p, actual_p)
コード例 #47
0
ファイル: test_mlp.py プロジェクト: lamblin/pylearn2
def test_softmax_bin_targets_channels(seed=0):
    """
    Constructs softmax layers with binary target and with vector targets
    to check that they give the same 'misclass' channel value.
    """
    np.random.seed(seed)
    num_classes = 2
    batch_size = 5
    mlp_bin = MLP(
        layers=[Softmax(num_classes, 's1', irange=0.1,
                        binary_target_dim=1)],
        nvis=100
    )
    mlp_vec = MLP(
        layers=[Softmax(num_classes, 's1', irange=0.1)],
        nvis=100
    )

    X = mlp_bin.get_input_space().make_theano_batch()
    y_bin = mlp_bin.get_target_space().make_theano_batch()
    y_vec = mlp_vec.get_target_space().make_theano_batch()

    X_data = np.random.random(size=(batch_size, 100))
    X_data = X_data.astype(theano.config.floatX)
    y_bin_data = np.random.randint(low=0, high=num_classes,
                                   size=(batch_size, 1))
    y_vec_data = np.zeros((batch_size, num_classes),
                          dtype=theano.config.floatX)
    y_vec_data[np.arange(batch_size), y_bin_data.flatten()] = 1

    def channel_value(channel_name, model, y, y_data):
        chans = model.get_monitoring_channels((X, y))
        f_channel = theano.function([X, y], chans['s1_' + channel_name])
        return f_channel(X_data, y_data)

    for channel_name in ['misclass', 'nll']:
        vec_val = channel_value(channel_name, mlp_vec, y_vec, y_vec_data)
        bin_val = channel_value(channel_name, mlp_bin, y_bin, y_bin_data)
        print(channel_name, vec_val, bin_val)
        np.testing.assert_allclose(vec_val, bin_val)
コード例 #48
0
ファイル: test_mlp.py プロジェクト: dmitriy-serdyuk/pylearn2
def test_input_and_target_source():
    """
    Create a MLP and test input_source and target_source
    for default and non-default options.
    """
    mlp = MLP(
        layers=[CompositeLayer(
                    'composite',
                    [Linear(10, 'h0', 0.1),
                     Linear(10, 'h1', 0.1)],
                    {
                        0: [1],
                        1: [0]
                    }
                )
        ],
        input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
        input_source=('features0', 'features1'),
        target_source=('targets0', 'targets1')
    )
    np.testing.assert_equal(mlp.get_input_source(), ('features0', 'features1'))
    np.testing.assert_equal(mlp.get_target_source(), ('targets0', 'targets1'))

    mlp = MLP(
        layers=[Linear(10, 'h0', 0.1)],
        input_space=VectorSpace(15)
    )
    np.testing.assert_equal(mlp.get_input_source(), 'features')
    np.testing.assert_equal(mlp.get_target_source(), 'targets')
コード例 #49
0
def test_sigmoid_detection_cost():
    """
    Tests whether the sigmoid convolutional layer returns the right value.
    """

    rng = np.random.RandomState(0)
    sigmoid_nonlin = SigmoidConvNonlinearity(monitor_style="detection")
    (rows, cols) = (10, 10)
    axes = ('c', 0, 1, 'b')
    nchs = 1

    space_shp = (nchs, rows, cols, 1)
    X_vals = np.random.uniform(-0.01, 0.01,
                               size=space_shp).astype(config.floatX)
    X = theano.shared(X_vals, name="X")

    Y_vals = (np.random.uniform(-0.01, 0.01,
                                size=(rows, cols)) > 0.005).astype("uint8")
    Y = theano.shared(Y_vals, name="y_vals")

    conv_elemwise = ConvElemwise(layer_name="h0",
                                 output_channels=1,
                                 irange=.005,
                                 kernel_shape=(1, 1),
                                 max_kernel_norm=0.9,
                                 nonlinearity=sigmoid_nonlin)

    input_space = pylearn2.space.Conv2DSpace(shape=(rows, cols),
                                             num_channels=nchs,
                                             axes=axes)
    model = MLP(batch_size=1,
                layers=[conv_elemwise],
                input_space=input_space)
    Y_hat = model.fprop(X)
    cost = model.cost(Y, Y_hat).eval()

    assert not(np.isnan(cost) or np.isinf(cost) or (cost < 0.0)
               or (cost is None)), ("cost returns illegal "
                                    "value.")
コード例 #50
0
def test_conv_pooling_nonlin():
    """
    Tests whether the nonlinearity is applied before the pooling.
    """

    rng = np.random.RandomState(0)
    sigm_nonlin = SigmoidConvNonlinearity(monitor_style="detection")
    (rows, cols) = (5, 5)
    axes = ('c', 0, 1, 'b')
    nchs = 1

    space_shp = (nchs, rows, cols, 1)
    X_vals = np.random.uniform(-0.01, 0.01,
                               size=space_shp).astype(config.floatX)
    X = theano.shared(X_vals, name="X")

    conv_elemwise = ConvElemwise(layer_name="h0",
                                 output_channels=1,
                                 pool_type="max",
                                 irange=.005,
                                 kernel_shape=(1, 1),
                                 pool_shape=(1, 1),
                                 pool_stride=(1, 1),
                                 nonlinearity=sigm_nonlin)

    input_space = pylearn2.space.Conv2DSpace(shape=(rows, cols),
                                             num_channels=nchs,
                                             axes=axes)
    model = MLP(batch_size=1,
                layers=[conv_elemwise],
                input_space=input_space)

    Y_hat = model.fprop(X)
    assert "max" in str(Y_hat.name)
    ancestors = theano.gof.graph.ancestors([Y_hat])
    lcond = ["sigm" in str(anc.owner) for anc in ancestors]
    assert np.array(lcond).nonzero()[0].shape[0] > 0, ("Nonlinearity should be "
                                                       "applied before pooling.")
コード例 #51
0
ファイル: test_rnn.py プロジェクト: capybaralet/pylearn2
    def test_fprop(self):
        """
        Use an RNN without non-linearity to create the Mersenne numbers
        (2 ** n - 1) to check whether fprop works correctly.
        """
        rnn = MLP(input_space=SequenceSpace(VectorSpace(dim=1)),
                  layers=[Recurrent(dim=1, layer_name='recurrent',
                                    irange=0.1, indices=[-1],
                                    nonlinearity=lambda x: x)])
        W, U, b = rnn.layers[0].get_params()
        W.set_value([[1]])
        U.set_value([[2]])

        X_data, X_mask = rnn.get_input_space().make_theano_batch()
        y_hat = rnn.fprop((X_data, X_mask))

        seq_len = 20
        X_data_vals = np.ones((seq_len, seq_len, 1))
        X_mask_vals = np.triu(np.ones((seq_len, seq_len)))

        f = function([X_data, X_mask], y_hat, allow_input_downcast=True)
        np.testing.assert_allclose(2 ** np.arange(1, seq_len + 1) - 1,
                                   f(X_data_vals, X_mask_vals).flatten())
コード例 #52
0
ファイル: test_mlp.py プロジェクト: julius506/pylearn2
def test_get_layer_monitor_channels():
    """
    Create a MLP with multiple layer types
    and get layer monitoring channels for MLP.
    """
    mlp = MLP(
        layers=[
            FlattenerLayer(
                CompositeLayer(
                    'composite',
                    [Linear(10, 'h0', 0.1),
                     Linear(10, 'h1', 0.1)],
                    {
                        0: [1],
                        1: [0]
                    }
                )
            ),
            Softmax(5, 'softmax', 0.1)
        ],
        input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
        input_source=('features0', 'features1')
    )
    dataset = VectorSpacesDataset(
        (np.random.rand(20, 20).astype(theano.config.floatX),
         np.random.rand(20, 15).astype(theano.config.floatX),
         np.random.rand(20, 5).astype(theano.config.floatX)),
        (CompositeSpace([
            VectorSpace(20),
            VectorSpace(15),
            VectorSpace(5)]),
        ('features1', 'features0', 'targets'))
    )
    state_below = mlp.get_input_space().make_theano_batch()
    targets = mlp.get_target_space().make_theano_batch()
    mlp.get_layer_monitoring_channels(state_below=state_below,
            state=None, targets=targets)
コード例 #53
0
ファイル: test_vae.py プロジェクト: JesseLivezey/pylearn2
def test_conditional_initialize_parameters():
    """
    Conditional.initialize_parameters does the following:
    * Set its input_space and ndim attributes
    * Calls its MLP's set_mlp method
    * Sets its MLP's input_space
    * Validates its MLP
    * Sets its params and param names
    """
    mlp = MLP(layers=[Linear(layer_name="h", dim=5, irange=0.01, max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name="conditional")
    vae = DummyVAE()
    conditional.set_vae(vae)
    input_space = VectorSpace(dim=5)
    conditional.initialize_parameters(input_space=input_space, ndim=5)

    testing.assert_same_object(input_space, conditional.input_space)
    testing.assert_equal(conditional.ndim, 5)
    testing.assert_same_object(mlp.get_mlp(), conditional)
    testing.assert_same_object(mlp.input_space, input_space)
    mlp_params = mlp.get_params()
    conditional_params = conditional.get_params()
    assert all([mp in conditional_params for mp in mlp_params])
    assert all([cp in mlp_params for cp in conditional_params])
コード例 #54
0
ファイル: test_mlp.py プロジェクト: julius506/pylearn2
def test_weight_decay_0():
    nested_mlp = MLP(layer_name='nested_mlp', layers=[IdentityLayer(2, 'h0', irange=0)])
    mlp = MLP(nvis=2, layers=[nested_mlp])
    weight_decay = mlp.get_weight_decay([0])
    assert isinstance(weight_decay, theano.tensor.TensorConstant)
    assert weight_decay.dtype == theano.config.floatX

    weight_decay = mlp.get_weight_decay([[0]])
    assert isinstance(weight_decay, theano.tensor.TensorConstant)
    assert weight_decay.dtype == theano.config.floatX

    nested_mlp.add_layers([IdentityLayer(2, 'h1', irange=0)])
    weight_decay = mlp.get_weight_decay([[0, 0.1]])
    assert weight_decay.dtype == theano.config.floatX