Exemplo n.º 1
0
def make_classifiers():

    names = ["ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)", "ELM(10,tribas)", "ELM(hardlim)", "ELM(20,rbf(0.1))"]

    nh = 1000

    # pass user defined transfer func
    sinsq = (lambda x: np.power(np.sin(x), 2.0))
    srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq)

    # use internal transfer funcs
    srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
    srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
    srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')

    # use gaussian RBF
    srhl_rbf = RBFRandomLayer(n_hidden=nh*2, rbf_width=0.1, random_state=0)
    log_reg = LogisticRegression()

    classifiers = [GenELMClassifier(hidden_layer=srhl_tanh),
                  GenELMClassifier(hidden_layer=srhl_sinsq),
                  GenELMClassifier(hidden_layer=srhl_tribas),
                  GenELMClassifier(hidden_layer=srhl_hardlim),
                  GenELMClassifier(hidden_layer=srhl_rbf)]
    
    # classifiers = [GenELMClassifier(hidden_layer=srhl_tanh)]

    return names, classifiers
def eml_classifier(train_features, train_ratings, test_features, test_ratings):
    nh = [10, 20, 50, 100]
    # srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
    # srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
    # srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')

    srhl_rbf = RBFRandomLayer(n_hidden=nh[0] * 2,
                              rbf_width=0.1,
                              random_state=0)
    clf = GenELMClassifier(hidden_layer=srhl_rbf)
    clf.fit(train_features, train_ratings)
    accuracy = clf.score(test_features, test_ratings)
    print(accuracy)
    return accuracy
Exemplo n.º 3
0
def classifiers(trainingRatingmatrix, testingRatingMatrix, trainLabel,
                testLabel):
    ### logistic regression
    clf = LogisticRegression(solver='lbfgs',
                             multi_class='multinomial',
                             class_weight='balanced').fit(
                                 trainingRatingmatrix, trainLabel)
    acc = clf.score(testingRatingMatrix, testLabel)
    prediction = clf.predict(testingRatingMatrix)
    print("LOgictic Regression Accuracy", acc, "RMSE",
          sklearnRMSE(prediction, testLabel), "NMAE:",
          NMAE(prediction, testLabel))
    ## LDA---------------------------------------------------------------
    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
    clf2 = LinearDiscriminantAnalysis(solver='svd')
    clf2.fit(trainingRatingmatrix, trainLabel)
    acc = clf2.score(testingRatingMatrix, testLabel)
    LDA_prediction = clf2.predict(testingRatingMatrix)
    print("LDA::Accuracy", acc, "RMSE", sklearnRMSE(LDA_prediction, testLabel),
          "NMAE:", NMAE(LDA_prediction, testLabel))
    ##PCA---------------------------------------------------------------
    pca = PCA(n_components=1000)
    pca.fit(trainingRatingmatrix)
    PCA_train = pca.transform(trainingRatingmatrix)
    PCA_test = pca.transform(testingRatingMatrix)
    clf = LogisticRegression(solver='lbfgs', multi_class='multinomial')
    clf.fit(PCA_train, trainLabel)
    acc = clf.score(PCA_test, testLabel)
    prediction = clf.predict(PCA_test)
    print("PCA: Accuracy", acc, "RMSE", sklearnRMSE(prediction, testLabel),
          "NMAE:", NMAE(prediction, testLabel))

    ## MLP classifier---------------------------------------------------------------
    clf_mlp = MLPClassifier(solver='lbfgs',
                            alpha=1e-5,
                            hidden_layer_sizes=(1000, 100),
                            random_state=1)
    clf_mlp.fit(trainingRatingmatrix, trainLabel)
    acc = clf_mlp.score(testingRatingMatrix, testLabel)
    prediction = clf_mlp.predict(testingRatingMatrix)
    print("MLP: Accuracy", acc, "RMSE", sklearnRMSE(prediction, testLabel),
          "NMAE:", NMAE(prediction, testLabel))

    ## ELM---------------------------------------------------------------
    nh = 100
    srhl_rbf = RBFRandomLayer(n_hidden=nh * 2, rbf_width=0.1, random_state=0)
    name = ["rbf(0.1))"]
    classifiers = [GenELMClassifier(hidden_layer=srhl_rbf)]
    for classifier, clf in zip(name, classifiers):
        clf.fit(trainingRatingmatrix, trainLabel)
        prediction = clf.predict(testingRatingMatrix)
        score = clf.score(testingRatingMatrix, testLabel)

        print('ELM Model %s Accuracy: %s' % (classifier, score), "RMSE",
              sklearnRMSE(prediction, testLabel), "NMAE",
              NMAE(prediction, testLabel))
    ########
    print(
        "==========================================================================="
    )
Exemplo n.º 4
0
def make_classifiers():
    names = [
        "ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)", "ELM(10,tribas)",
        "ELM(hardlim)", "ELM(20,rbf(0.1))"
    ]

    nh = 10

    # pass user defined transfer func
    sinsq = (lambda x: np.power(np.sin(x), 2.0))
    srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq)

    # use internal transfer funcs
    srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
    srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
    srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')

    # use gaussian RBF
    srhl_rbf = RBFRandomLayer(n_hidden=nh * 2, rbf_width=0.1, random_state=0)
    #log_reg = LogisticRegression()
    '''
    classifiers = [GenELMClassifier(hidden_layer=srhl_tanh),
                   #GenELMClassifier(hidden_layer=srhl_tanh, regressor=log_reg),
                   GenELMClassifier(hidden_layer=srhl_sinsq),
                   GenELMClassifier(hidden_layer=srhl_tribas),
                   GenELMClassifier(hidden_layer=srhl_hardlim),
                   GenELMClassifier(hidden_layer=srhl_rbf)]
    '''
    '''
    from sklearn_extensions.extreme_learning_machines.elm import ELMClassifier
    classifiers = [ELMClassifier(n_hidden=30, rbf_width=0.01, random_state=0, alpha=0.1)]
    '''
    classifiers = [
        GenELMClassifier(hidden_layer=RBFRandomLayer(
            n_hidden=HIDDEN_NODE_COUNT, rbf_width=0.05, random_state=0))
    ]

    return names, classifiers
Exemplo n.º 5
0
plot_confusion_matrix(preds, labels, names, title='ANN-MLP confusion matrix')

print('Classification report: ')
print(classification_report(labels, preds, target_names=names))

#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------

#Applying ELM

# Define a monte-carlo cross-validation generator (reduce variance):
cv = KFold(len(labels), 10, shuffle=True, random_state=42)
elm_model = MLPRandomLayer(n_hidden=100000, activation_func='tanh')
clf = make_pipeline(CSP(n_components=len(picks), reg=None, log=True, norm_trace=False),
                    GenELMClassifier(hidden_layer=elm_model))

#Apply xDawnCovriance and TangentSpace instead of CSP
n_components = 1
elm_model = MLPRandomLayer(n_hidden=100000, activation_func='tanh')
clf = make_pipeline(XdawnCovariances(n_components),
                    TangentSpace(metric='riemann'),
                    GenELMClassifier(hidden_layer=elm_model))


preds = np.zeros(len(labels))

for train_idx, test_idx in cv:
    y_train, y_test = labels[train_idx], labels[test_idx]
    clf.fit(ica_data[train_idx], y_train)
    preds[test_idx] = clf.predict(ica_data[test_idx])
Exemplo n.º 6
0
import pandas as pd
pima = pd.read_csv('pima-indians-diabetes.csv', encoding="shift-jis")
pima.columns = [
    'pregnant', 'plasmaGlucose', 'bloodP', 'skinThick', 'serumInsulin',
    'weight', 'pedigree', 'age', 'diabetes'
]
from sklearn.model_selection import train_test_split
y = pima['diabetes']
X = pima.drop(['diabetes'], axis=1)
nh = 4
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=54,
                                                    shuffle=True)
from sklearn_extensions.extreme_learning_machines.elm import GenELMClassifier
from sklearn_extensions.extreme_learning_machines.random_layer import RBFRandomLayer, MLPRandomLayer
srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
srhl_rbf = RBFRandomLayer(n_hidden=nh * 2, rbf_width=0.1, random_state=0)
clf1 = GenELMClassifier(hidden_layer=srhl_tanh)
clf1.fit(X_train, y_train)
print(clf1.score(X_test, y_test))
'''
dic=dict(zip(X.columns,clf.feature_importances_))
for item in sorted(dic.items(), key=lambda x: x[1], reverse=True):
    print(item[0],round(item[1],4))
'''
Exemplo n.º 7
0
# transpose for cuda ELM
train = features1.transpose()
test = features2.transpose()
Y_train_T = Y_train.transpose()
Y_test_T = Y_test.transpose()

### save the data in csv file for ELM-CUDA
np.savetxt('cuda_elm/features_cifar100/train_features.csv',
           train,
           delimiter=',')
np.savetxt('cuda_elm/features_cifar100/test_features.csv', test, delimiter=',')
np.savetxt('cuda_elm/features_cifar100/train_labels.csv',
           Y_train_T,
           delimiter=',')
np.savetxt('cuda_elm/features_cifar100/test_labels.csv',
           Y_test_T,
           delimiter=',')

############ try with ELM on CPU and compare with our ELM-CUDA #################
# convert back to original labels
y_train = np.argmax(Y_train, axis=-1)
y_test = np.argmax(Y_test, axis=-1)
from sklearn_extensions.extreme_learning_machines.elm import GenELMClassifier
from sklearn_extensions.extreme_learning_machines.random_layer import RBFRandomLayer, MLPRandomLayer
clf = GenELMClassifier(
    hidden_layer=MLPRandomLayer(n_hidden=200, activation_func='tanh'))
clf.fit(features1, y_train)
res = clf.score(features2, y_test)
print("ELM score:", res * 100)
Exemplo n.º 8
0
embark_location = pd.get_dummies(titanic_data['Embarked'], drop_first=True)
titanic_data.drop(['Sex', 'Embarked'], axis=1, inplace=True)
titanic_dmy = pd.concat([titanic_data, gender, embark_location], axis=1)
titanic_dmy.drop(['Pclass'], axis=1, inplace=True)
titanic_dmy.drop(['Q'], axis=1, inplace=True)

X = titanic_dmy.iloc[:, [1, 2, 3, 4, 5, 6]].values
y = titanic_dmy.iloc[:, 0].values

X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=.3,
                                                    random_state=25)

nh = 10

#Primeiro EML baseado em MPL com função de ativação sigmoid
srhl_sigmoid = MLPRandomLayer(n_hidden=nh, activation_func='sigmoid')
elm_model = GenELMClassifier(hidden_layer=srhl_sigmoid)
elm_model.fit(X_train, y_train)
score = elm_model.score(X_test, y_test)
print(score)

#Primeiro EML baseado em rede RBF
srhl_rbf = RBFRandomLayer(n_hidden=nh * 2, rbf_width=0.1, random_state=0)
elm_model = GenELMClassifier(hidden_layer=srhl_rbf)
elm_model.fit(X_train, y_train)
score = elm_model.score(X_test, y_test)
print(score)
Exemplo n.º 9
0
y = dataset.target
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

print(y)

#feature scaling'
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

nh = 10
t0 = time.time()
srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')

classifier = GenELMClassifier(hidden_layer=srhl_tanh)

classifier.fit(X_train, y_train)
print "training time:", round(time.time() - t0, 5), "s"

t0 = time.time()
y_pred = classifier.predict(X_test)
print "Testing time:", round(time.time() - t0, 5), "s"
accuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10)
accuracies.mean()
print "Train Accuracy::", accuracies.mean() * 100
accuracies = cross_val_score(estimator=classifier, X=X_test, y=y_test, cv=3)
accuracies.mean()
print "Test Accuracy::", accuracies.mean() * 100
Exemplo n.º 10
0
rbf_widths = rbf_width
activation_funcs = ['tanh', 'sigmoid', 'gaussian']
rl = [[aa, bb, cc, dd] for aa in n_hidden for bb in alphas for cc in rbf_widths
      for dd in activation_funcs]
rls = []
for combo in rl:
    rls.append(
        RandomLayer(n_hidden=combo[0],
                    alpha=combo[1],
                    rbf_width=combo[2],
                    activation_func=combo[3]))

param_grid = {'hidden_layer': rls}

#Instantiate model
elmc = GenELMClassifier(hidden_layer=rls[0])

#Perform grid search, return best parameters and estimator
elmc_parameters, elmc_estimator = model_param_selector(X_train, y_train,
                                                       X_valid, y_valid, elmc,
                                                       param_grid, 'elmc')

#Save best model
joblib.dump(pnn_estimator, 'elmc_.pkl')

#Predict and evaluate performance
elmc_predictions = elmc_estimator.predict(X_valid)

elmc_accuracy = accuracy_score(np.array(y_valid), np.array(elmc_predictions))

elmc_report = classification_report(np.array(y_valid),
Exemplo n.º 11
0
def make_classifier():
    # use internal transfer funcs
    srhl_tanh = MLPRandomLayer(n_hidden=10, activation_func='tanh')

    return GenELMClassifier(hidden_layer=srhl_tanh)
normalize_test_features = scaler.transform(test_features)
print(normalize_train_features.shape)
print(normalize_test_features.shape)

# transpose for cuda ELM
train = normalize_train_features.transpose()
test = normalize_test_features.transpose()
Y_train_T = Y_train.transpose()
Y_test_T = Y_test.transpose()

### save the data in csv file for ELM-CUDA
np.savetxt('cuda_elm/features_cifar10/train_features.csv',
           train,
           delimiter=',')
np.savetxt('cuda_elm/features_cifar10/test_features.csv', test, delimiter=',')
np.savetxt('cuda_elm/features_cifar10/train_labels.csv',
           Y_train_T,
           delimiter=',')
np.savetxt('cuda_elm/features_cifar10/test_labels.csv',
           Y_test_T,
           delimiter=',')

############ try with ELM on CPU and compare with our ELM-CUDA #################
# convert back to original labels
y_train = np.argmax(Y_train, axis=-1)
y_test = np.argmax(Y_test, axis=-1)
clf = GenELMClassifier(
    hidden_layer=MLPRandomLayer(n_hidden=1000, activation_func='tanh'))
clf.fit(normalize_train_features, y_train)
res = clf.score(normalize_test_features, y_test)
print("ELM score:", res * 100)