예제 #1
0
def test_som_reduces_distances():
    # SOM functions correctly if is moves neurons towards inputs
    input_matrix, target_matrix = datasets.get_xor()

    # Small initial weight range chosen so network isn't "accidentally"
    # very close to inputs initially (which could cause test to fail)
    som_ = som.SOM(2, 4, initial_weights_range=0.25)

    # Convenience function
    def min_distances():
        all_closest = []
        for inp_vec in input_matrix:
            distances = som_.activate(inp_vec)
            all_closest.append(min(distances))
        return all_closest

    # Train SOM
    # Assert that distances have decreased
    all_closest = min_distances()
    som_.train(input_matrix, target_matrix, iterations=20)
    new_closest = min_distances()
    print all_closest
    print new_closest
    for old_c, new_c in zip(all_closest, new_closest):
        assert new_c < old_c
예제 #2
0
def test_rbf_convergence():
    # Run until convergence
    # assert that network can converge
    model = rbf.RBF(2, 4, 2, scale_by_similarity=True)
    dataset = datasets.get_xor()

    model.train(*dataset, retries=5, error_break=0.002)
    assert validation.get_error(model, *dataset) <= 0.02
예제 #3
0
def test_mlp_convergence():
    # Run until convergence
    # assert that network can converge
    model = mlp.MLP((2, 4, 2))
    dataset = datasets.get_xor()

    model.train(*dataset, retries=5, error_break=0.002)
    assert validation.get_error(model, *dataset) <= 0.02
예제 #4
0
def test_pbnn_convergence():
    # Run until convergence
    # assert that network can converge
    model = PBNN()
    dataset = datasets.get_xor()

    model.train(*dataset)
    assert validation.get_error(model, *dataset) <= 0.02
예제 #5
0
def test_mlp():
    # Run for a couple of iterations
    # assert that new error is less than original
    model = mlp.MLP((2, 2, 2))
    dataset = datasets.get_xor()

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *dataset) < error
예제 #6
0
def test_rbf():
    # Run for a couple of iterations
    # assert that new error is less than original
    model = rbf.RBF(2, 4, 2, scale_by_similarity=True)
    dataset = datasets.get_xor()

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *dataset) < error
예제 #7
0
def test_mlp_classifier():
    # Run for a couple of iterations
    # assert that new error is less than original
    model = mlp.MLP((2, 2, 2),
                    transfers=mlp.SoftmaxTransfer(),
                    error_func=CrossEntropy())
    dataset = datasets.get_xor()

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=20)
    assert validation.get_error(model, *dataset) < error
예제 #8
0
def test_ill_mlp_convergence_exact_target():
    # Run until convergence
    # assert that network can converge
    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)), grid_spacing=XOR_SPACING, learn_exact=True)

    error = validation.get_error(model, *dataset)
    model.train(*dataset, retries=5, error_break=0.002)
    assert validation.get_error(
        model, *dataset) < 0.02, "Training should reach low error"
예제 #9
0
def test_ill_mlp_exact_target():
    # Run for a couple of iterations
    # assert that new error is less than original
    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)), grid_spacing=XOR_SPACING, learn_exact=True)

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *
                                dataset) < error, "Training decreases error"
예제 #10
0
def test_neuralfield_convergence():
    # Run until convergence
    # assert that network can converge
    dataset = datasets.get_xor()

    model = ill.make_neuralfield(2, grid_spacing=XOR_SPACING)

    error = validation.get_error(model, *dataset)
    model.train(*dataset, error_break=0.002)
    assert validation.get_error(
        model, *dataset) < 0.02, "Training should reach low error"
예제 #11
0
def test_neuralfield():
    # Run for a couple of iterations
    # assert that new error is less than original
    dataset = datasets.get_xor()

    model = ill.make_neuralfield(2, grid_spacing=XOR_SPACING)

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *
                                dataset) < error, "Training decreases error"
예제 #12
0
def test_count_classes():
    _, target_matrix = datasets.get_xor()
    class_counts = preprocess._count_classes(target_matrix)

    assert len(class_counts) == 2
    assert class_counts[(0, 1)] == 2
    assert class_counts[(1, 0)] == 2

    target_matrix = [['foo'], ['bar'], ['bar']]
    class_counts = preprocess._count_classes(target_matrix)

    assert len(class_counts) == 2
    assert class_counts[('foo', )] == 1
    assert class_counts[('bar', )] == 2
예제 #13
0
def test_select_sample_size_none(seed_random):
    input_matrix, target_matrix = datasets.get_xor()

    new_inp_matrix, new_tar_matrix = base.select_sample(
        input_matrix, target_matrix)
    assert new_inp_matrix.shape == input_matrix.shape
    assert new_tar_matrix.shape == target_matrix.shape

    for inp_vec in input_matrix:  # all in
        assert inp_vec in new_inp_matrix
    for tar_vec in target_matrix:  # all in
        assert tar_vec in new_tar_matrix

    assert not (new_inp_matrix == input_matrix).all()  # order different
    assert not (new_tar_matrix == target_matrix).all()  # order different
예제 #14
0
def test_select_random_size_none(monkeypatch):
    # Monkeypatch so we know that random returns
    monkeypatch.setattr(random, 'randint',
                        lambda x, y: 0)  # randint always returns 0

    input_matrix, target_matrix = datasets.get_xor()
    new_inp_matrix, new_tar_matrix = base.select_random(
        input_matrix, target_matrix)
    assert new_inp_matrix.shape == input_matrix.shape
    assert new_tar_matrix.shape == target_matrix.shape

    for inp_vec in new_inp_matrix:
        assert (inp_vec == input_matrix[0]).all()  # due to monkeypatch
    for tar_vec in new_tar_matrix:
        assert (tar_vec == target_matrix[0]).all()  # due to monkeypatch
예제 #15
0
def test_post_pattern_callback():
    dataset = datasets.get_xor()
    model = helpers.EmptyModel()

    inp_history = []
    tar_history = []

    def callback(model, input_vec, target_vec):
        inp_history.append(input_vec)
        tar_history.append(target_vec)

    model.train(*dataset, iterations=1, post_pattern_callback=callback)
    inp_history = numpy.array(inp_history)
    tar_history = numpy.array(tar_history)
    assert (dataset[0] == inp_history).all()
    assert (dataset[1] == tar_history).all()
예제 #16
0
def test_ill_mlp_dim_reduction_tuple(monkeypatch):
    REDUCED_DIMENSIONS = 1

    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)),
                    grid_spacing=XOR_SPACING,
                    dim_reduction=(2, REDUCED_DIMENSIONS))

    # Points should have reduced dimensions
    points = _get_neighborhood_points(model, dataset, monkeypatch)
    for point in points:
        assert len(point) == REDUCED_DIMENSIONS

    # Should be able to train
    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=10)
    assert validation.get_error(model, *
                                dataset) < error, "Training decreases error"
예제 #17
0
def test_select_sample(seed_random):
    input_matrix, target_matrix = datasets.get_xor()

    # Test size param
    new_inp_matrix, new_tar_matrix = base.select_sample(input_matrix,
                                                        target_matrix,
                                                        size=2)
    assert new_inp_matrix.shape[0] == 2
    assert new_tar_matrix.shape[0] == 2

    # No duplicates
    count = 0
    for inp_vec in new_inp_matrix:
        if inp_vec in input_matrix:
            count += 1
    assert count == 2

    count = 0
    for tar_vec in new_tar_matrix:
        if tar_vec in target_matrix:
            count += 1
    assert count == 2
예제 #18
0
def test_ill_mlp_dim_reduction_tuple_reset(monkeypatch):
    REDUCED_DIMENSIONS = 1

    dataset = datasets.get_xor()

    model = ill.ILL(MLP((2, 2, 2)),
                    grid_spacing=XOR_SPACING,
                    dim_reduction=(2, REDUCED_DIMENSIONS))

    # Points should have reduced dimensions
    points = _get_neighborhood_points(model, dataset, monkeypatch)
    for point in points:
        assert len(point) == REDUCED_DIMENSIONS

    # Points should be different after reset
    model.reset()
    new_points = _get_neighborhood_points(model, dataset, monkeypatch)
    for point in new_points[
            1:]:  # Ignore (0, 0) point, it will always have same reduced dims
        assert point not in points
    # Should still have reduced dimensions
    for point in points:
        assert len(point) == REDUCED_DIMENSIONS
예제 #19
0
def test_isdataset():
    assert validation._isdataset(datasets.get_xor()) is True
    assert validation._isdataset([datasets.get_and(),
                                  datasets.get_xor()]) is False