コード例 #1
0
                            alpha=0.95,
                            activation_func='multiquadric',
                            activation_args=None,
                            user_components=None,
                            regressor=linear_model.Ridge(),
                            random_state=None)

        #======================
        # TRAIN ELM
        #======================
        elm.fit(TRAIN, Y_train)

        #======================
        # ELM PREDICTION
        #======================
        Y_pred = elm.predict(TEST)

        #======================
        # SCORE CALCULATION
        #======================
        score = elm.score(TEST, Y_test)

        f1_weighted = metrics.f1_score(
            Y_test,
            Y_pred,
            labels=None,
            pos_label=1,
            average='weighted',
            sample_weight=None)  #toma en cuenta el desbalance de etiqueta
        precision_score = metrics.precision_score(Y_test,
                                                  Y_pred,
コード例 #2
0
ファイル: main.py プロジェクト: iagorosa/Pattern_Recognition
y_test = X[X[:, 0] == v][:, 1]

y_train_real = y_train.copy()

#%%
#### ELM DEFINIDO PELO SKLEARN

clsf = ELMClassifier(random_state=100)
#clsf.fit(X[:v, 2:], y[:v])
clsf.fit(X_train, y_train)

Htreino = clsf._get_weights()

print()

print(clsf.predict(X_test))

#print()

#print(y[v:])

print('Score:', clsf.score(X_test, y_test))

Hteste = clsf._get_weights()

#%%

################# ELM IMPLENTÇÃO DO KAGGLE  ################
# https://www.kaggle.com/robertbm/extreme-learning-machine-example #

# funções
コード例 #3
0
    rot.append([*arq.split('.')])

#%%

#arr = np.array(arr)
#class_dict = dict(zip(np.unique(arr[:,2]), range(len(np.unique(arr[:,2])))))
rot = np.array(rot)
class_dict = dict(zip(np.unique(rot[:, 1]), range(len(np.unique(rot[:, 1])))))

print('Quantidade de rotulos:', len(np.unique(rot[:, 1])))
print('Quantidade de pessoas:', len(np.unique(rot[:, 0])))

X = np.array(arr)  # TRANSFORMANDO VETOR DAS IMAGENS EM ARRAY
y = rot[:, 1]  # PEGANDO APENAS OS LABELS

y = np.vectorize(class_dict.get)(y)  # TRANSFORMANDO LABELS EM NUMEROS

#a = ELMRegressor()
#a.fit(X, y)

v = -11
clsf = ELMClassifier()
clsf.fit(X[:v], y[:v])

print()

print(clsf.predict(X[v:]))
print(y[v:])

print('Socore:', clsf.score(X[v:], y[v:]))
コード例 #4
0
    TRAIN = sX_train # --> Datos ESTANDARIZADOS

    TEST = sX_test # --> Datos ESTANDARIZADOS


    clf=ELMClassifier(n_hidden=4000,alpha=0.95,activation_func='multiquadric',activation_args=None,
                      user_components=None,regressor=linear_model.Ridge(),random_state=None)

    clf.fit(TRAIN, Y_train)

    #TEST = X_test  # --> Datos CRUDOS
    TEST = sX_test # --> Datos ESTANDARIZADOS
    #TEST = nX_test # --> Datos NORMALIZADOS

    Y_pred = clf.predict(TEST)

    #======================
    # SCORE CALCULATION
    #======================
    score = clf.score(TEST, Y_test)

    print("\nAccuracy: %0.2f" % (score))

    #Cohen’s kappa-[-1,1]->Si>0.8 se considera ok
    score=metrics.cohen_kappa_score(Y_test, Y_pred)
    print("Cohen’s kappa: %0.2f " % score)
    # Hamming loss¶ . when 0 excelent
    score=metrics.hamming_loss(Y_test, Y_pred)
    print("Hamming loss: %0.2f " % score)