예제 #1
0
def test_ill_mlp_convergence_exact_target():
    # Run until convergence
    # assert that network can converge
    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)), grid_spacing=XOR_SPACING, learn_exact=True)

    error = validation.get_error(model, *dataset)
    model.train(*dataset, retries=5, error_break=0.002)
    assert validation.get_error(
        model, *dataset) < 0.02, "Training should reach low error"
예제 #2
0
def test_ill_mlp_exact_target():
    # Run for a couple of iterations
    # assert that new error is less than original
    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)), grid_spacing=XOR_SPACING, learn_exact=True)

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *
                                dataset) < error, "Training decreases error"
예제 #3
0
def test_Model_stochastic_train():
    """Train with stochastic gradient descent."""
    from learning import transfer, error, validation, MLP

    dataset = datasets.get_iris()

    model = MLP((len(dataset[0][0]), 2, len(dataset[1][0])),
                transfers=transfer.SoftmaxTransfer(),
                error_func=error.CrossEntropyError())

    # Model should be able to converge with mini-batches
    model.stochastic_train(
        *dataset,
        error_break=0.02,
        pattern_selection_func=lambda X, Y: base.select_sample(X, Y, size=30),
        train_kwargs={
            'iterations': 100,
            'error_break': 0.02
        })

    assert validation.get_error(model, *dataset) <= 0.03
예제 #4
0
def test_serialize_unserialize():
    dataset = (numpy.random.random((10, 10)), numpy.random.random((10, 2, 10)))

    model = multioutputs.MultiOutputs(MLP((10, 2, 10)), 2)
    unserialized_model = multioutputs.MultiOutputs.unserialize(
        model.serialize())

    assert isinstance(unserialized_model, multioutputs.MultiOutputs)
    assert (helpers.fix_numpy_array_equality([
        model.activate(inp_vec) for inp_vec in dataset[0]
    ]) == helpers.fix_numpy_array_equality(
        [unserialized_model.activate(inp_vec) for inp_vec in dataset[0]]))
예제 #5
0
def test_ill_mlp_dim_reduction_tuple(monkeypatch):
    REDUCED_DIMENSIONS = 1

    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)),
                    grid_spacing=XOR_SPACING,
                    dim_reduction=(2, REDUCED_DIMENSIONS))

    # Points should have reduced dimensions
    points = _get_neighborhood_points(model, dataset, monkeypatch)
    for point in points:
        assert len(point) == REDUCED_DIMENSIONS

    # Should be able to train
    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *
                                dataset) < error, "Training decreases error"
예제 #6
0
def test_ill_mlp_dim_reduction_tuple_reset(monkeypatch):
    REDUCED_DIMENSIONS = 1

    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)),
                    grid_spacing=XOR_SPACING,
                    dim_reduction=(2, REDUCED_DIMENSIONS))

    # Points should have reduced dimensions
    points = _get_neighborhood_points(model, dataset, monkeypatch)
    for point in points:
        assert len(point) == REDUCED_DIMENSIONS

    # Points should be different after reset
    model.reset()
    new_points = _get_neighborhood_points(model, dataset, monkeypatch)
    for point in new_points[
            1:]:  # Ignore (0, 0) point, it will always have same reduced dims
        assert point not in points
    # Should still have reduced dimensions
    for point in points:
        assert len(point) == REDUCED_DIMENSIONS
예제 #7
0
# Grab the popular iris dataset, from our library of datasets
dataset = datasets.get_iris()

# Make a multilayer perceptron to classify the iris dataset
model = MLP(
    # The MLP will take 4 attributes, have 1 hidden layer with 2 neurons,
    # and outputs one of 3 classes
    (4, 2, 3),

    # We will use a softmax output layer for this classification problem
    # Because we are only changing the output transfer, we pass a single
    # Transfer object. We could customize all transfer layers by passing
    # a list of Transfer objects.
    transfers=SoftmaxTransfer(),

    # Cross entropy error will pair nicely with our softmax output.
    error_func=CrossEntropyError(),

    # Lets use the quasi-newton BFGS optimizer for this problem
    # BFGS requires and n^2 operation, where n is the number of weights,
    # but this isn't a problem for our relatively small MLP.
    # If we don't want to deal with optimizers, the default
    # option will select an appropriate optimizer for us.
    optimizer=optimize.BFGS(
        # We can even customize the line search method
        step_size_getter=optimize.WolfeLineSearch(
            # And the initial step size for our line search
            initial_step_getter=optimize.FOChangeInitialStep())))

# NOTE: For rapid prototyping, we could quickly implement an MLP as follows
# model = MLP((4, 2, 3))