def test_dropout_mlp(): # Run for a couple of iterations # assert that new error is less than original model = mlp.DropoutMLP((2, 8, 2)) dataset = datasets.get_and() error = validation.get_error(model, *dataset) model.train(*dataset, iterations=20) assert validation.get_error(model, *dataset) < error
def test_mlp_classifier_convergence(): # Run until convergence # assert that network can converge model = mlp.MLP((2, 3, 2), transfers=mlp.SoftmaxTransfer(), error_func=CrossEntropy()) dataset = datasets.get_and() model.train(*dataset, retries=5, error_break=0.002) assert validation.get_error(model, *dataset) <= 0.02
def test_dropout_mlp_classifier(): # Run for a couple of iterations # assert that new error is less than original model = mlp.DropoutMLP((2, 8, 2), transfers=mlp.SoftmaxTransfer(), error_func=CrossEntropy()) dataset = datasets.get_and() error = validation.get_error(model, *dataset) model.train(*dataset, iterations=20) assert validation.get_error(model, *dataset) < error
def test_dropout_mlp_convergence(): # Run until convergence # assert that network can converge # Since XOR does not really need dropout, we use high probabilities model = mlp.DropoutMLP((2, 8, 2), input_active_probability=1.0, hidden_active_probability=0.9) dataset = datasets.get_and() # Easier and dataset for lienar output # Error break lower than cutoff, since dropout may have different error # after training model.train(*dataset, retries=5, error_break=0.002, error_improve_iters=50) # Dropout sacrifices training accuracy for better generalization # so we don't worry as much about convergence assert validation.get_error(model, *dataset) <= 0.1
def test_isdataset(): assert validation._isdataset(datasets.get_xor()) is True assert validation._isdataset([datasets.get_and(), datasets.get_xor()]) is False