Ejemplo n.º 1
0
def test_select_sample_size_none(seed_random):
    input_matrix, target_matrix = datasets.get_xor()

    new_inp_matrix, new_tar_matrix = base.select_sample(
        input_matrix, target_matrix)
    assert new_inp_matrix.shape == input_matrix.shape
    assert new_tar_matrix.shape == target_matrix.shape

    for inp_vec in input_matrix:  # all in
        assert inp_vec in new_inp_matrix
    for tar_vec in target_matrix:  # all in
        assert tar_vec in new_tar_matrix

    assert not (new_inp_matrix == input_matrix).all()  # order different
    assert not (new_tar_matrix == target_matrix).all()  # order different
Ejemplo n.º 2
0
def test_select_sample_size_none(seed_random):
    # Default to size smaller than number of samples

    input_matrix, target_matrix = datasets.get_random_classification(
        1000, 1, 2)

    new_inp_matrix, new_tar_matrix = base.select_sample(
        input_matrix, target_matrix)

    assert len(new_inp_matrix) < len(input_matrix)
    assert len(new_inp_matrix) == base._selection_size_heuristic(
        len(input_matrix))

    assert len(new_tar_matrix) < len(input_matrix)
    assert len(new_tar_matrix) == base._selection_size_heuristic(
        len(input_matrix))
Ejemplo n.º 3
0
def test_select_sample_size_none_few_samples(seed_random):
    # When number of samples is low, default to all samples

    input_matrix, target_matrix = datasets.get_xor()

    new_inp_matrix, new_tar_matrix = base.select_sample(
        input_matrix, target_matrix)
    assert new_inp_matrix.shape == input_matrix.shape
    assert new_tar_matrix.shape == target_matrix.shape

    for inp_vec in input_matrix:  # all in
        assert inp_vec in new_inp_matrix
    for tar_vec in target_matrix:  # all in
        assert tar_vec in new_tar_matrix

    assert not (new_inp_matrix == input_matrix).all()  # Different order
    assert not (new_tar_matrix == target_matrix).all()  # Different order
Ejemplo n.º 4
0
def test_Model_stochastic_train():
    """Train with stochastic gradient descent."""
    from learning import transfer, error, validation, MLP

    dataset = datasets.get_iris()

    model = MLP((len(dataset[0][0]), 2, len(dataset[1][0])),
                transfers=transfer.SoftmaxTransfer(),
                error_func=error.CrossEntropyError())

    # Model should be able to converge with mini-batches
    model.stochastic_train(
        *dataset,
        error_break=0.02,
        pattern_selection_func=lambda X, Y: base.select_sample(X, Y, size=30),
        train_kwargs={
            'iterations': 100,
            'error_break': 0.02
        })

    assert validation.get_error(model, *dataset) <= 0.03
Ejemplo n.º 5
0
def test_select_sample(seed_random):
    input_matrix, target_matrix = datasets.get_xor()

    # Test size param
    new_inp_matrix, new_tar_matrix = base.select_sample(input_matrix,
                                                        target_matrix,
                                                        size=2)
    assert new_inp_matrix.shape[0] == 2
    assert new_tar_matrix.shape[0] == 2

    # No duplicates
    count = 0
    for inp_vec in new_inp_matrix:
        if inp_vec in input_matrix:
            count += 1
    assert count == 2

    count = 0
    for tar_vec in new_tar_matrix:
        if tar_vec in target_matrix:
            count += 1
    assert count == 2