Exemple #1
0
                         random_state=int(job_id))
    clf.fit(X[:5000], y[:5000])

if __name__ == '_main__':
    param_distributions = {
        'max_iter': UniformInt(100, 1500),
        'hidden_layer_sizes': UniformInt(100, 1000),
        'alpha': LogUniform(.001, 0.01),
        'learning_rate_init': Uniform(.1, 1.),
        'power_t': Uniform(.5, .99),
    }
    CONFIG['ensemble_list'] = [
        'btc', 'btc2', 'btc3', 'btc4', 'svc', 'svc2', 'svc3', 'nn', 'nn2',
        'nic', 'mpc', 'knc', 'etc', 'log', 'keras', 'cccv', 'crfcbag',
        'cetcbag'
    ]
    X, Xtest = GetDataset('ensemble', ensemble_list=CONFIG['ensemble_list'])
    clf = GaussianProcessCV(estimator=MultilayerPerceptronClassifier(
        verbose=False, learning_rate='invscaling'),
                            param_distributions=param_distributions,
                            kernel=DoubleExponential,
                            scoring=LogLoss,
                            mu_prior=-.50,
                            sigma_prior=.10,
                            sig=.001,
                            cv=5,
                            max_iter=100,
                            random_state=int(job_id),
                            time_budget=24 * 3600)
    clf.fit(X, y)
def test_gradient():
    """Test gradient.

    This makes sure that the activation functions and their derivatives
    are correct. The approximated and the real gradients
    should be close.

    """
    X = np.array([[0.3, 0.2, 0.1, 0.3], [0.4, 0.6, 0.43, 0.2], [0.1, 0.2, 0.3, 0.6], \
                  [0.4, 2., 3., 4.]])

    for activation in ['logistic', 'relu', 'tanh']:
        # Create MLP List
        mlps = [('regressor',
                 MultilayerPerceptronRegressor(activation=activation,
                                               hidden_layer_sizes=10,
                                               max_iter=1)),
                ('classifier',
                 MultilayerPerceptronClassifier(activation=activation,
                                                hidden_layer_sizes=10,
                                                max_iter=1)),
                ('autoencoder',
                 MultilayerPerceptronAutoencoder(hidden_layer_sizes=10,
                                                 max_iter=1))]

        for name, mlp in mlps:
            if name == 'autoencoder':
                y = X.copy()
                Y = X.copy()
            else:
                y = np.array([1, 1, 0, 0])
                Y = LabelBinarizer().fit_transform(y)

            mlp.fit(X, y)

            theta = np.hstack(
                [l.ravel() for l in mlp.layers_coef_ + mlp.layers_intercept_])

            layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
                           [mlp.n_outputs_])

            activations = []
            deltas = []
            coef_grads = []
            intercept_grads = []

            activations.append(X)
            for i in range(mlp.n_layers_ - 1):
                activations.append(np.empty((X.shape[0], layer_units[i + 1])))
                deltas.append(np.empty((X.shape[0], layer_units[i + 1])))

                fan_in = layer_units[i]
                fan_out = layer_units[i + 1]
                coef_grads.append(np.empty((fan_in, fan_out)))
                intercept_grads.append(np.empty(fan_out))

            # analytically compute the gradients
            cost_grad_fun = lambda t: mlp._cost_grad_lbfgs(
                t, X, Y, activations, deltas, coef_grads, intercept_grads)
            [_, real_gradient] = cost_grad_fun(theta)
            approximated_gradient = np.zeros(np.size(theta))
            n = np.size(theta, 0)
            perturb = np.zeros(theta.shape)
            epsilon = 1e-6
            # numerically compute the gradients
            for i in range(n):
                # dtheta = E[:, i] * epsilon
                # print dtheta
                perturb[i] = epsilon
                approximated_gradient[i] = (
                    cost_grad_fun(theta + perturb)[0] -
                    cost_grad_fun(theta - perturb)[0]) / (epsilon * 2.0)
                perturb[i] = 0

            assert_almost_equal(approximated_gradient, real_gradient)
    print "Gradient Test Passed!"
Exemple #3
0
        record = line.split(',')

        X.append(record[0] + ' ' + record[1])
        y.append(int(record[-1]))

# create vectorizer
transformer = CountVectorizer(stop_words="english", binary=True)
X = transformer.fit_transform(X)
y = np.array(y)

# create classifier list
clfs = [('Logistic Regression ', LogisticRegression(penalty='l1')),
        ('SVM Linear ', SVC(kernel='linear', probability=True)),
        ('SVM Poly ', SVC(kernel='poly', probability=True)),
        ('SVM RBF ', SVC(kernel='rbf', probability=True)),
        ('MLP ', MultilayerPerceptronClassifier(n_hidden=25)),
        ('Sparse MLP ',
         MultilayerPerceptronClassifierSparse(n_hidden=25,
                                              sparsity_param=0.12))]

print "Wait for the magic ..."

# apply cross validation
for (name, clf) in clfs:
    random.seed(0)
    score = cross_validation.cross_val_score(clf,
                                             X,
                                             y,
                                             cv=3,
                                             scoring='roc_auc')
"""
==============================================
Using multilayer perceptron for classification
==============================================

This uses multi-layer perceptron to train on a digits dataset. The example
then reports the training score.
"""

from sklearn.datasets import load_digits

from multilayer_perceptron import MultilayerPerceptronClassifier

# Load dataset
digits = load_digits()
X, y = digits.data, digits.target

# Create MLP Object
# Please see line 562 in "multilayer_perceptron.py" for more information
# about the parameters
mlp = MultilayerPerceptronClassifier(hidden_layer_sizes = (50, 20), \
                                     max_iter = 200, alpha = 0.02)

# Train MLP
mlp.fit(X, y)

# Report scores
print "Training Score = ", mlp.score(X, y)
print "Predicted labels = ", mlp.predict(X)
print "True labels = ", y
Exemple #5
0
from sklearn.datasets import load_digits
from multilayer_perceptron import MultilayerPerceptronClassifier, MultilayerPerceptronRegressor
import numpy as np
from matplotlib import pyplot as plt

# contrive the "exclusive or" problem
X = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
y = np.array([0, 1, 1, 0])

# MLP training performance
mlp = MultilayerPerceptronClassifier(n_hidden=5, max_iter=200, alpha=0.02)
mlp.fit(X, y)

print "Training Score = ", mlp.score(X, y)
print "Predicted labels = ", mlp.predict(X)
print "True labels = ", y
# plot decision function

xx, yy = np.meshgrid(np.linspace(-1, 2, 500), np.linspace(-1, 2, 500))
Z = mlp.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

plt.imshow(Z,
           extent=(xx.min(), xx.max(), yy.min(), yy.max()),
           aspect='auto',
           origin='lower',
           cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2, linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=70, c=y, cmap=plt.cm.Paired)

plt.axis([-1, 2, -1, 2])