def test_make_train_test_sets_2d_labels():
    inputs = numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0],
                          [0.0, 1.0], [1.0, 1.0]])
    labels = numpy.array([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0], [0, 1]])

    assert (helpers.fix_numpy_array_equality(
        validation.make_train_test_sets(
            inputs, labels, 1)) == helpers.fix_numpy_array_equality(
                ((inputs[:2], labels[:2]), (inputs[2:], labels[2:]))))
    # Cross entropy error will pair nicely with our softmax output.
    error_func=CrossEntropyError(),

    # Lets use the quasi-newton BFGS optimizer for this problem
    # BFGS requires and n^2 operation, where n is the number of weights,
    # but this isn't a problem for our relatively small MLP.
    # If we don't want to deal with optimizers, the default
    # option will select an appropriate optimizer for us.
    optimizer=optimize.BFGS(
        # We can even customize the line search method
        step_size_getter=optimize.WolfeLineSearch(
            # And the initial step size for our line search
            initial_step_getter=optimize.FOChangeInitialStep())))

# NOTE: For rapid prototyping, we could quickly implement an MLP as follows
# model = MLP((4, 2, 3))

# Lets train our MLP
# First, we'll split our dataset into training and testing sets
# Our training set will contain 30 samples from each class
training_set, testing_set = validation.make_train_test_sets(*dataset,
                                                            train_per_class=30)

# We could customize training and stopping criteria through
# the arguments of train, but the defaults should be sufficient here
model.train(*training_set)

# Our MLP should converge in a couple of seconds
# Lets see how our MLP does on the testing set
print 'Testing accuracy:', validation.get_accuracy(model, *testing_set)