コード例 #1
0
def perform_elm(Xn, yn, nSess=1, kernelType='linear'):
    groups = get_groups(Xn, nSess)
    logo_fold = LeaveOneGroupOut()
    n_folds = logo_fold.get_n_splits(groups = groups)

    total_samples = Xn.shape[0]
    n_young_samples = int(total_samples/2)
    actual_ = np.zeros((n_folds, 2))
    predict_ = np.zeros((n_folds, 2))
    decifunc_gri = np.zeros((n_folds, 2))
    folds_iter = 0

    print('\nClassify using ELM: (%s)' % kernelType)
    print(" Performing leave one subject out cross fold with %d outer_folds"
          " and %d inner_folds" % (n_folds, n_folds-1))

    # For each iteration sessions of one subject are left out, the
    # classifier is trained with the sessions of the other subjects and,
    # the classifier is tested against the data of the left out subject.

    for train_index, test_index in logo_fold.split(Xn, yn, groups):
        X_t_train, X_t_test = Xn[train_index], Xn[test_index]
        y_train, y_test = yn[train_index], yn[test_index]
        
        inner_fold = LeaveOneGroupOut()
        pgrid = { "n_hidden": np.arange(10, 300, 10),
                "rbf_width": np.arange(0.1, 0.5, 0.05)
                }
        elmc_ = ELMClassifier(n_hidden=10, random_state=42, rbf_width=0.1, activation_func=kernelType, binarizer=LabelBinarizer(0, 1))
        gridclf = GridSearchCV(estimator = elmc_, param_grid = pgrid, refit=True,
                               cv = inner_fold)
        g = gridclf.fit(X_t_train, y_train, groups = groups[train_index])
        actual_[folds_iter] = y_test
        predict_[folds_iter] = gridclf.predict(X_t_test)
        decifunc_gri[folds_iter] = gridclf.decision_function(X_t_test).reshape(2,)
        folds_iter += 1
            
    actual = actual_.reshape(total_samples,)
    predict = predict_.reshape(total_samples,)
    success = (actual == predict)
    n_success = len(success[success == True])
    print(" Classification accuracy =", (n_success / total_samples) * 100, "%")
    print(' Confusion Matrix:\n', confusion_matrix(actual, predict))
    decifunc_gri = decifunc_gri.reshape(total_samples,)
    print(' roc_auc_score =', roc_auc_score(actual, decifunc_gri))
    
コード例 #2
0
celm = elm.ELMClassifier(n_hidden=n_hidden,
                         activation_func='sigmoid',
                         func_hidden_layer=func_hidden_layer,
                         bias=True,
                         random_state=random_state,
                         regressor='ls_dual',
                         degree=3,
                         lbd=2)
cmelm = elm.ELMMLPClassifier(n_hidden=n_hidden,
                             activation_func='relu',
                             func_hidden_layer=func_hidden_layer,
                             random_state=random_state,
                             regressor='pinv')

skt_elm = ELMClassifier(n_hidden=n_hidden,
                        random_state=random_state,
                        activation_func=activation_func,
                        binarizer=LabelBinarizer(0, 1))
#r2 = 0
#%%
######### TRAIN

celm.fit(X_train, y_train)
cmelm.fit(X_train, y_train)
#skt_elm.fit(X_train, y_train)

#%%
######### TEST

r1 = celm.predict(X_test, y_test)[0]
r2 = cmelm.predict(X_test, y_test)[0]
#r3 = skt_elm.score(X_test, y_test)
コード例 #3
0
            X_train)  # Built scaler to scale values in range [Xmin,Xmax]
        # APPLY TRAINED SCALER
        sX_train = scaler.transform(X_train)

        sX_test = scaler.transform(X_test)

        sX_train.mean(axis=0)

        TRAIN = sX_train  # --> Datos ESTANDARIZADOS

        TEST = sX_test  # --> Datos ESTANDARIZADOS

        elm = ELMClassifier(n_hidden=3000,
                            alpha=0.95,
                            activation_func='multiquadric',
                            activation_args=None,
                            user_components=None,
                            regressor=linear_model.Ridge(),
                            random_state=None)

        #======================
        # TRAIN ELM
        #======================
        elm.fit(TRAIN, Y_train)

        #======================
        # ELM PREDICTION
        #======================
        Y_pred = elm.predict(TEST)

        #======================
コード例 #4
0
celm = elm.ELMClassifier(n_hidden=hidden_layer,
                         activation_func='sigmoid',
                         func_hidden_layer=func_hidden_layer,
                         bias=True,
                         random_state=random_state,
                         regressor='ls_dual',
                         degree=3,
                         lbd=4)
cmelm = elm.ELMMLPClassifier(n_hidden=hidden_layer,
                             activation_func='relu',
                             func_hidden_layer=func_hidden_layer,
                             random_state=random_state,
                             regressor='ls')

skt_elm = ELMClassifier(n_hidden=hidden_layer,
                        random_state=random_state,
                        activation_func=activation_func)

results = [[[], [], []], [[], [], []]]
r3 = 0
for i in range(X_folds.shape[0]):

    val = list(range(int(n_person / k_fold)))
    val.remove(i)

    X_train = np.concatenate([X_folds[val[0]], X_folds[val[1]]])
    X_test = X_folds[i]

    y_train = X_train[:, 0]
    y_test = X_test[:, 0]
コード例 #5
0
    rot.append([*arq.split('.')])

#%%

#arr = np.array(arr)
#class_dict = dict(zip(np.unique(arr[:,2]), range(len(np.unique(arr[:,2])))))
rot = np.array(rot)
class_dict = dict(zip(np.unique(rot[:, 1]), range(len(np.unique(rot[:, 1])))))

print('Quantidade de rotulos:', len(np.unique(rot[:, 1])))
print('Quantidade de pessoas:', len(np.unique(rot[:, 0])))

X = np.array(arr)  # TRANSFORMANDO VETOR DAS IMAGENS EM ARRAY
y = rot[:, 1]  # PEGANDO APENAS OS LABELS

y = np.vectorize(class_dict.get)(y)  # TRANSFORMANDO LABELS EM NUMEROS

#a = ELMRegressor()
#a.fit(X, y)

v = -11
clsf = ELMClassifier()
clsf.fit(X[:v], y[:v])

print()

print(clsf.predict(X[v:]))
print(y[v:])

print('Socore:', clsf.score(X[v:], y[v:]))
コード例 #6
0
ファイル: main.py プロジェクト: iagorosa/Pattern_Recognition
###### DEFINIÇÕES DE TREINO E TESTE ##############

v = 1

X_train = X[X[:, 0] != v][:, 2:]
y_train = X[X[:, 0] != v][:, 1]

X_test = X[X[:, 0] == v][:, 2:]
y_test = X[X[:, 0] == v][:, 1]

y_train_real = y_train.copy()

#%%
#### ELM DEFINIDO PELO SKLEARN

clsf = ELMClassifier(random_state=100)
#clsf.fit(X[:v, 2:], y[:v])
clsf.fit(X_train, y_train)

Htreino = clsf._get_weights()

print()

print(clsf.predict(X_test))

#print()

#print(y[v:])

print('Score:', clsf.score(X_test, y_test))
コード例 #7
0
rot = np.array(rot)
class_dict = dict(zip(np.unique(rot[:, 1]), range(len(np.unique(rot[:, 1])))))

print('Quantidade de rotulos:', len(np.unique(rot[:, 1])))
print('Quantidade de pessoas:', len(np.unique(rot[:, 0])))

X = np.array(arr)
y = rot[:, 1]

y = np.vectorize(class_dict.get)(y)

#a = ELMRegressor()
#a.fit(X, y)

v = -11
clsf = ELMClassifier()
clsf.fit(X[:v], y[:v])

#%%

from sklearn_extensions.extreme_learning_machines.elm import ELMRegressor

import numpy as np
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.preprocessing import StandardScaler
#from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression

from sklearn_extensions.extreme_learning_machines.elm import GenELMClassifier
from sklearn_extensions.extreme_learning_machines.random_layer import RBFRandomLayer, MLPRandomLayer