Esempio n. 1
0
def test_dropout_mlp():
    # Run for a couple of iterations
    # assert that new error is less than original
    model = mlp.DropoutMLP((2, 8, 2))
    dataset = datasets.get_and()

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=20)
    assert validation.get_error(model, *dataset) < error
Esempio n. 2
0
def test_dropout_mlp_classifier():
    # Run for a couple of iterations
    # assert that new error is less than original
    model = mlp.DropoutMLP(
        (2, 8, 2), transfers=SoftmaxTransfer(), error_func=CrossEntropyError())
    dataset = datasets.get_and()

    error = validation.get_error(model, *dataset)
    model.train(*dataset, iterations=20)
    assert validation.get_error(model, *dataset) < error
Esempio n. 3
0
def test_dropout_mlp_dropout():
    model = mlp.DropoutMLP(
        (2, 4, 3), input_active_probability=0.5, hidden_active_probability=0.5)

    # Only bias and active neurons should not be 0
    model.train_step([[1, 1]], [[1, 1, 1]])

    # Should still have DropoutTransfers (until activation outside of training)

    _validate_weight_inputs(model._weight_inputs[0],
                            model._input_transfer._active_neurons)
    for weight_inputs, transfer_func in zip(model._weight_inputs[1:-1],
                                            model._transfers[:-1]):
        _validate_weight_inputs(weight_inputs, transfer_func._active_neurons)
Esempio n. 4
0
def test_dropout_mlp_convergence():
    # Run until convergence
    # assert that network can converge
    # Since XOR does not really need dropout, we use high probabilities
    model = mlp.DropoutMLP(
        (2, 8, 2), input_active_probability=1.0, hidden_active_probability=0.9)
    dataset = datasets.get_and()  # Easier and dataset for lienar output

    # Error break lower than cutoff, since dropout may have different error
    # after training
    model.train(*dataset, retries=5, error_break=0.002, error_improve_iters=50)

    # Dropout sacrifices training accuracy for better generalization
    # so we don't worry as much about convergence
    assert validation.get_error(model, *dataset) <= 0.1
Esempio n. 5
0
def test_dropout_mlp_post_training():
    # Post training should happen on first activate after training (train step),
    # and not more than once, unless training begins again
    model = mlp.DropoutMLP(
        (2, 4, 3), input_active_probability=0.5, hidden_active_probability=0.5)

    # Train, modifying active neurons and weights
    model.train_step([[1, 1]], [[1, 1, 1]])
    pre_procedure_weights = copy.deepcopy(model._weight_matrices)

    # Should call post_training procedure after activate
    model.activate([1, 1])
    _validate_post_training(model, pre_procedure_weights)

    # Weights should not change after another activate
    model.activate([1, 1])
    _validate_post_training(model, pre_procedure_weights)