Beispiel #1
0
def multi_voters_example():
    """ Example of using a combination of many types of voters, which may be seen as multi-kernel learning (MKL).

    This particular dataset is easy to solve and combining voters degrades performance. However, it might be a good
    idea for more complex datasets.
    """
    # MinCq parameters, fixed to a given value as this is a simple example.
    mu = 0.001

    # We load iris dataset, We convert the labels to be -1 or 1, and we split it in two parts: train and test.
    dataset = load_iris()
    dataset.target[dataset.target == 0] = -1
    dataset.target[dataset.target == 2] = -1
    X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, random_state=42)

    # We create a set of voters of different kind.
    voters = voter.KernelVotersGenerator(rbf_kernel, gamma=0.01).generate(X_train)
    voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=0.1).generate(X_train))
    voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=1).generate(X_train))
    voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=10).generate(X_train))
    voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=100).generate(X_train))
    voters = np.append(voters, voter.KernelVotersGenerator(polynomial_kernel, degree=2).generate(X_train))
    voters = np.append(voters, voter.KernelVotersGenerator(polynomial_kernel, degree=3).generate(X_train))
    voters = np.append(voters, voter.KernelVotersGenerator(linear_kernel).generate(X_train))

    # We train MinCq using these voters, on the training set.
    learner = MinCqLearner(mu, voters_type='manual')
    learner.fit(X_train, y_train, voters)

    # We predict the train and test labels and print the risk.
    predictions_train = learner.predict(X_train)
    predictions_test = learner.predict(X_test)

    print("\nMultiVotersMinCq")
    print("-----------")
    print("Training set risk: {:.4f}".format(zero_one_loss(y_train, predictions_train)))
    print("Testing set risk: {:.4f}\n".format(zero_one_loss(y_test, predictions_test)))
Beispiel #2
0
def simple_classification_example():
    """ Simple example : with fixed hyperparameters, run four versions of MinCq on a single dataset.
    """
    # MinCq parameters, fixed to a given value as this is a simple example.
    mu = 0.001

    # We load iris dataset, We convert the labels to be -1 or 1, and we split it in two parts: train and test.
    dataset = load_iris()
    dataset.target[dataset.target == 0] = -1
    dataset.target[dataset.target == 2] = -1
    X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, random_state=42)

    # We train MinCq using decision stumps as voters, on the training set.
    learner = MinCqLearner(mu, voters_type='stumps')
    learner.fit(X_train, y_train)

    # We predict the train and test labels and print the risk.
    predictions_train = learner.predict(X_train)
    predictions_test = learner.predict(X_test)

    print("\nStumpsMinCq")
    print("-----------")
    print("Training set risk: {:.4f}".format(zero_one_loss(y_train, predictions_train)))
    print("Testing set risk: {:.4f}\n".format(zero_one_loss(y_test, predictions_test)))

    # We do the same again, now with a linear kernel.
    learner = MinCqLearner(mu, voters_type='kernel', kernel='linear')
    learner.fit(X_train, y_train)

    predictions_train = learner.predict(X_train)
    predictions_test = learner.predict(X_test)

    print("\nLinearMinCq")
    print("-----------")
    print("Training set risk: {:.4f}".format(zero_one_loss(y_train, predictions_train)))
    print("Testing set risk: {:.4f}\n".format(zero_one_loss(y_test, predictions_test)))

    # We do the same again, now with a polynomial kernel.
    learner = MinCqLearner(mu, voters_type='kernel', kernel='poly')
    learner.fit(X_train, y_train)

    predictions_train = learner.predict(X_train)
    predictions_test = learner.predict(X_test)

    print("\nPolyMinCq")
    print("-----------")
    print("Training set risk: {:.4f}".format(zero_one_loss(y_train, predictions_train)))
    print("Testing set risk: {:.4f}\n".format(zero_one_loss(y_test, predictions_test)))

    # We do the same again, now with an RBF kernel.
    learner = MinCqLearner(mu, voters_type='kernel', kernel='rbf', gamma=0.0)
    learner.fit(X_train, y_train)

    predictions_train = learner.predict(X_train)
    predictions_test = learner.predict(X_test)

    print("\nRbfMinCq")
    print("--------")
    print("Training set risk: {:.4f}".format(zero_one_loss(y_train, predictions_train)))
    print("Testing set risk: {:.4f}\n".format(zero_one_loss(y_test, predictions_test)))