Example #1
0
def apply_mlp(datasets, algorithm="l-bfgs", h=40, minibatchsize=200, epoch=5, seed=1):
    ''' Applies the MLP

    :type datasets: list
    :param datasets: List containing training/testing data
    
    :type cores: int
    :param cores: Number of cores
    
    :type seed: int
    :param seed: Random seed
    '''
    print 'Applying', algorithm
    training_X, training_y = datasets[0]
    testing_X, testing_y = datasets[1]
    #clf = SGDClassifier(loss="log", random_state=seed, shuffle=True, n_iter=epoch, verbose=1, n_jobs=cores)
    mlp = MultilayerPerceptronClassifier(n_hidden = h, activation="tanh", batch_size=minibatchsize, algorithm=algorithm, max_iter = epoch,shuffle=True,verbose=True,random_state=seed)
    print 'Fitting...'
    #clf.fit(training_X, training_y)
    mlp.fit(training_X,training_y)
    #predict_y = clf.predict(testing_X)
    print "Accuracy on testing data:", mlp.score(testing_X, testing_y)
Example #2
0
                         random_state=int(job_id))
    clf.fit(X[:5000], y[:5000])

if __name__ == '_main__':
    param_distributions = {
        'max_iter': UniformInt(100, 1500),
        'hidden_layer_sizes': UniformInt(100, 1000),
        'alpha': LogUniform(.001, 0.01),
        'learning_rate_init': Uniform(.1, 1.),
        'power_t': Uniform(.5, .99),
    }
    CONFIG['ensemble_list'] = [
        'btc', 'btc2', 'btc3', 'btc4', 'svc', 'svc2', 'svc3', 'nn', 'nn2',
        'nic', 'mpc', 'knc', 'etc', 'log', 'keras', 'cccv', 'crfcbag',
        'cetcbag'
    ]
    X, Xtest = GetDataset('ensemble', ensemble_list=CONFIG['ensemble_list'])
    clf = GaussianProcessCV(estimator=MultilayerPerceptronClassifier(
        verbose=False, learning_rate='invscaling'),
                            param_distributions=param_distributions,
                            kernel=DoubleExponential,
                            scoring=LogLoss,
                            mu_prior=-.50,
                            sigma_prior=.10,
                            sig=.001,
                            cv=5,
                            max_iter=100,
                            random_state=int(job_id),
                            time_budget=24 * 3600)
    clf.fit(X, y)
def test_gradient():
    """Test gradient.

    This makes sure that the activation functions and their derivatives
    are correct. The approximated and the real gradients
    should be close.

    """
    X = np.array([[0.3, 0.2, 0.1, 0.3], [0.4, 0.6, 0.43, 0.2], [0.1, 0.2, 0.3, 0.6], \
                  [0.4, 2., 3., 4.]])

    for activation in ['logistic', 'relu', 'tanh']:
        # Create MLP List
        mlps = [('regressor',
                 MultilayerPerceptronRegressor(activation=activation,
                                               hidden_layer_sizes=10,
                                               max_iter=1)),
                ('classifier',
                 MultilayerPerceptronClassifier(activation=activation,
                                                hidden_layer_sizes=10,
                                                max_iter=1)),
                ('autoencoder',
                 MultilayerPerceptronAutoencoder(hidden_layer_sizes=10,
                                                 max_iter=1))]

        for name, mlp in mlps:
            if name == 'autoencoder':
                y = X.copy()
                Y = X.copy()
            else:
                y = np.array([1, 1, 0, 0])
                Y = LabelBinarizer().fit_transform(y)

            mlp.fit(X, y)

            theta = np.hstack(
                [l.ravel() for l in mlp.layers_coef_ + mlp.layers_intercept_])

            layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
                           [mlp.n_outputs_])

            activations = []
            deltas = []
            coef_grads = []
            intercept_grads = []

            activations.append(X)
            for i in range(mlp.n_layers_ - 1):
                activations.append(np.empty((X.shape[0], layer_units[i + 1])))
                deltas.append(np.empty((X.shape[0], layer_units[i + 1])))

                fan_in = layer_units[i]
                fan_out = layer_units[i + 1]
                coef_grads.append(np.empty((fan_in, fan_out)))
                intercept_grads.append(np.empty(fan_out))

            # analytically compute the gradients
            cost_grad_fun = lambda t: mlp._cost_grad_lbfgs(
                t, X, Y, activations, deltas, coef_grads, intercept_grads)
            [_, real_gradient] = cost_grad_fun(theta)
            approximated_gradient = np.zeros(np.size(theta))
            n = np.size(theta, 0)
            perturb = np.zeros(theta.shape)
            epsilon = 1e-6
            # numerically compute the gradients
            for i in range(n):
                # dtheta = E[:, i] * epsilon
                # print dtheta
                perturb[i] = epsilon
                approximated_gradient[i] = (
                    cost_grad_fun(theta + perturb)[0] -
                    cost_grad_fun(theta - perturb)[0]) / (epsilon * 2.0)
                perturb[i] = 0

            assert_almost_equal(approximated_gradient, real_gradient)
    print "Gradient Test Passed!"
Example #4
0
import numpy as np

from sklearn.cross_validation import train_test_split 
from sklearn.metrics import classification_report
from multilayer_perceptron  import MultilayerPerceptronClassifier

# Dataset
X = np.loadtxt('../feature/5grams_count_mc_features')
y = np.loadtxt('../data/tag_mc')
X -= X.min()
X /= X.max()
X_train, X_test, y_train, y_test = train_test_split(X, y)

# Instanciation
mlp = MultilayerPerceptronClassifier(activation='relu', hidden_layer_sizes = (20,), max_iter = 200)

# Train
mlp.fit(X_train, y_train)

# Report
preds = mlp.predict(X_test)
tags = y_test 
print classification_report(tags, preds)
"""
==============================================
Using multilayer perceptron for classification
==============================================

This uses multi-layer perceptron to train on a digits dataset. The example
then reports the training score.
"""

from sklearn.datasets import load_digits

from multilayer_perceptron import MultilayerPerceptronClassifier

# Load dataset
digits = load_digits()
X, y = digits.data, digits.target

# Create MLP Object
# Please see line 562 in "multilayer_perceptron.py" for more information
# about the parameters
mlp = MultilayerPerceptronClassifier(hidden_layer_sizes = (50, 20), \
                                     max_iter = 200, alpha = 0.02)

# Train MLP
mlp.fit(X, y)

# Report scores
print "Training Score = ", mlp.score(X, y)
print "Predicted labels = ", mlp.predict(X)
print "True labels = ", y
Example #6
0
        record = line.split(',')

        X.append(record[0] + ' ' + record[1])
        y.append(int(record[-1]))

# create vectorizer
transformer = CountVectorizer(stop_words="english", binary=True)
X = transformer.fit_transform(X)
y = np.array(y)

# create classifier list
clfs = [('Logistic Regression ', LogisticRegression(penalty='l1')),
        ('SVM Linear ', SVC(kernel='linear', probability=True)),
        ('SVM Poly ', SVC(kernel='poly', probability=True)),
        ('SVM RBF ', SVC(kernel='rbf', probability=True)),
        ('MLP ', MultilayerPerceptronClassifier(n_hidden=25)),
        ('Sparse MLP ',
         MultilayerPerceptronClassifierSparse(n_hidden=25,
                                              sparsity_param=0.12))]

print "Wait for the magic ..."

# apply cross validation
for (name, clf) in clfs:
    random.seed(0)
    score = cross_validation.cross_val_score(clf,
                                             X,
                                             y,
                                             cv=3,
                                             scoring='roc_auc')
Example #7
0
negative_images = glob.glob("/home/jonathan/Baobab/fromGE/train/wbackground/bg*.png")
for im in negative_images:
    image = loadImage(im)
    haars = getHaarFeatures(image)
    pat.append(haars)
    paty.append(0)
X2 = np.array(pat)
y2 = np.array(paty)

print "Finished loading images and computing second feature set:",datetime.datetime.now()


print "Beginning Training of first classifier:",datetime.datetime.now()

# MLP training performance
mlp = MultilayerPerceptronClassifier(n_hidden = 5,max_iter = 500, alpha = 0.02)
mlp.fit(X, y)
print "Beginning Training of second classifier:",datetime.datetime.now()
mlp2 = MultilayerPerceptronClassifier(n_hidden = 5,max_iter = 800, alpha = 0.02)
mlp2.fit(X2, y2)
print "Finished Training:",datetime.datetime.now()

print "Training Score = ", mlp.score(X,  y)
print "Training Score 2", mlp2.score(X2, y2)
#print "Predicted labels = ", mlp.predict(X)
#print "True labels = ", y
#print(datetime.datetime.now())
#print "Pedicted for test:", mlp.predict(test)
#print "Pedicted for test:", mlp.predict_proba(test)
#print(datetime.datetime.now())
Example #8
0
from sklearn.datasets import load_digits
from multilayer_perceptron  import MultilayerPerceptronClassifier, MultilayerPerceptronRegressor
import numpy as np
from matplotlib import pyplot as plt

# contrive the "exclusive or" problem
X = np.array([[0,0], [1,0], [0,1], [1,1]])
y = np.array([0, 1, 1, 0])

# MLP training performance
mlp = MultilayerPerceptronClassifier(n_hidden = 5,max_iter = 200, alpha = 0.02)
mlp.fit(X, y)

print "Training Score = ", mlp.score(X,y)
print "Predicted labels = ", mlp.predict(X)
print "True labels = ", y 
# plot decision function

xx, yy = np.meshgrid(np.linspace(-1, 2, 500),
                     np.linspace(-1, 2, 500))
Z = mlp.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

plt.imshow(Z, 
          extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
          origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
                      linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=70, c=y, cmap=plt.cm.Paired)

plt.axis([-1, 2, -1, 2])
Example #9
0
from sklearn.datasets import load_digits
from multilayer_perceptron import MultilayerPerceptronClassifier, MultilayerPerceptronRegressor
import numpy as np
from matplotlib import pyplot as plt

# contrive the "exclusive or" problem
X = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
y = np.array([0, 1, 1, 0])

# MLP training performance
mlp = MultilayerPerceptronClassifier(n_hidden=5, max_iter=200, alpha=0.02)
mlp.fit(X, y)

print "Training Score = ", mlp.score(X, y)
print "Predicted labels = ", mlp.predict(X)
print "True labels = ", y
# plot decision function

xx, yy = np.meshgrid(np.linspace(-1, 2, 500), np.linspace(-1, 2, 500))
Z = mlp.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

plt.imshow(Z,
           extent=(xx.min(), xx.max(), yy.min(), yy.max()),
           aspect='auto',
           origin='lower',
           cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2, linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=70, c=y, cmap=plt.cm.Paired)

plt.axis([-1, 2, -1, 2])
Using multilayer perceptron for classification
==============================================

This uses multi-layer perceptron to train on a digits dataset. The example
then reports the training score.
"""

from sklearn.datasets import load_digits

from multilayer_perceptron  import MultilayerPerceptronClassifier

# Load dataset
digits = load_digits()
X, y = digits.data, digits.target

# Create MLP Object
# Please see line 562 in "multilayer_perceptron.py" for more information 
# about the parameters
mlp = MultilayerPerceptronClassifier(hidden_layer_sizes = (50, 20), \
                                     max_iter = 200, alpha = 0.02)

# Train MLP
mlp.fit(X, y)

# Report scores
print "Training Score = ", mlp.score(X,y)
print "Predicted labels = ", mlp.predict(X)
print "True labels = ", y 


Example #11
0
from multilayer_perceptron  import MultilayerPerceptronClassifier


#Train dataset
X = np.loadtxt('../train_data')
y = np.loadtxt('../train_labels')
X, y = shuffle(X, y)


#Data normalization
X -= X.min()
X /= X.max()


#Instanciation
mlp = MultilayerPerceptronClassifier(activation='relu', hidden_layer_sizes = (100,), max_iter = 8)


#Training
mlp.fit(X, y)


#Test dataset
X = np.loadtxt('../test_data')
y = np.loadtxt('../test_labels')
X, y = shuffle(X, y)


#Data normalization
X -= X.min()
X /= X.max()