コード例 #1
0
def make_classifiers():

    names = [
        "ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)", "ELM(10,tribas)",
        "ELM(hardlim)", "ELM(20,rbf(0.1))"
    ]

    nh = 10

    # pass user defined transfer func
    sinsq = (lambda x: np.power(np.sin(x), 2.0))
    srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq)

    # use internal transfer funcs
    srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')

    srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')

    srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')

    # use gaussian RBF
    srhl_rbf = RBFRandomLayer(n_hidden=nh * 2, rbf_width=0.1, random_state=0)

    log_reg = LogisticRegression()

    classifiers = [
        GenELMClassifier(hidden_layer=srhl_tanh),
        GenELMClassifier(hidden_layer=srhl_tanh, regressor=log_reg),
        GenELMClassifier(hidden_layer=srhl_sinsq),
        GenELMClassifier(hidden_layer=srhl_tribas),
        GenELMClassifier(hidden_layer=srhl_hardlim),
        GenELMClassifier(hidden_layer=srhl_rbf)
    ]

    return names, classifiers
コード例 #2
0
def trainELMClassifier(trainData, trainLabels, testData):
    print("\nTraining ELM Classifier...")

    trainData = np.asarray(trainData)
    trainLabels = np.asarray(trainLabels)
    print(trainData.shape)
    print(trainLabels.shape)

    # create initialize elm activation functions
    nh = 100
    activation = 'tanh'

    if activation == 'rbf':
        act_layer = RBFRandomLayer(n_hidden=nh,
                                   random_state=0,
                                   rbf_width=0.001)
    elif activation == 'tanh':
        act_layer = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
    elif activation == 'tribas':
        act_layer = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
    elif activation == 'hardlim':
        act_layer = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')

    # initialize ELM Classifier
    elm = GenELMClassifier(hidden_layer=act_layer)

    t0 = time()
    elm.fit(trainData, trainLabels)
    print("\nTraining finished in %0.3fs \n" % (time() - t0))

    t0 = time()
    predictedLabels = elm.predict(testData)
    print("\nTesting finished in %0.3fs" % (time() - t0))

    t0 = time()
    confidence_scores = elm.decision_function(testData)
    print("\nTesting finished in %0.3fs" % (time() - t0))

    print("\nPredicted Labels")
    print("----------------------------------")
    print(predictedLabels)

    print("\nConfidence Scores")
    print("----------------------------------")
    print(confidence_scores)

    params = {
        'nh': nh,
        'af': activation,
    }

    return confidence_scores, predictedLabels, params
コード例 #3
0
ファイル: run_script.py プロジェクト: yolle103/solution
def TripleTest(x, y, pvalue_sort, top_k, threshold, classifer):
    index = []
    count = 0
    for i in range(0, top_k):  #取p_value值top x进行穷举
        index.append(pvalue_sort[i][0])

    if classifer == 'ELM':
        rbf_rhl = RBFRandomLayer(n_hidden=20,
                                 rbf_width=0.01,
                                 random_state=2018)
        clf = GenELMClassifier(hidden_layer=rbf_rhl)
    elif classifer == 'SVM':
        clf = SVC(kernel='linear', C=1)
    elif classifer == 'KNN':
        clf = neighbors.KNeighborsClassifier(n_neighbors=3)
    elif classifer == 'Normal_Bayes':
        clf = MultinomialNB(alpha=0.01)
    else:
        clf = DecisionTreeClassifier(random_state=0)

    combination = list(combinations(index, 3))  #前50个特征穷举,公19600组

    result = []
    #存储测试集正确率和训练集正确率都大于0.9的特征组合
    #((特征组合),训练集正确率,测试集正确率)
    value_set = []
    i_list = list(range(len(combination)))
    worker = partial(classify_func, combination, clf, x.T, y)
    # running in multithread
    pool = multiprocessing.Pool(4)

    pool_result = pool.map(worker, i_list)
    pool.close()
    pool.join()

    for res in pool_result:
        if res[2] >= threshold:
            result.append(
                [combination[res[4]], res[2], res[3], res[0], res[1]])
            count += 1
        value_set.append(res[2])

    return result, count, max(value_set)
コード例 #4
0
def make_classifiers():
    names = [   #"ELM(tanh)",
       # "ELM(tanh,LR)",
       # "ELM(sinsq)",
        #"ELM(sigmoid)",
       # "ELM(sine)",
       # "ELM(inv_tribas)",
       # "ELM(softlim)",
       # "ELM(gaussian)",
       # "ELM(multiquadric)",
       # "ELM(inv_multiquadric)",
        #"ELM(tribas)",
       # "ELM(hardlim)",
        #"Basic ELM(hardlim)",
        #"ELM(rbf(0.1))",
       # "LR",
        #"LDA",9
        #"KNN",
        #"DT",
        #"NB",
        #"RDF",
        "SVM(linear)",
        #"SVM(rbf)",
        #"SVM(sigmoid)",
        #"SVM(poly)"
    ]
    # # Hidden nodes
    nh = 2000
    #
    # pass user defined transfer func
    sinsq = (lambda x: np.power(np.sin(x), 2.0))
    srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq)
    # use internal transfer funcs
    srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
    # use tribas
    srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
    # use hardlim
    srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')
    # use gaussian RBF
    srhl_rbf = RBFRandomLayer(n_hidden=nh * 2, rbf_width=0.1, random_state=0)
    # use sigmoid
    srhl_sigmoid = MLPRandomLayer(n_hidden=nh, activation_func='sigmoid')
    # use sine
    srhl_sine = MLPRandomLayer(n_hidden=nh, activation_func='sine')
    # use inv_tribas
    srhl_inv_tribas = MLPRandomLayer(n_hidden=nh, activation_func='inv_tribas')
    # use softlim
    srhl_softlim = MLPRandomLayer(n_hidden=nh, activation_func='softlim')
    # use gaussian
    srhl_gaussian = MLPRandomLayer(n_hidden=nh, activation_func='gaussian')
    # use multiquadric
    srhl_multiquadric = MLPRandomLayer(n_hidden=nh, activation_func='multiquadric')
    # use inv_multiquadric
    srhl_inv_multiquadric = MLPRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
    log_reg = LogisticRegression()

    classifiers = [ #  GenELMClassifier(hidden_layer=srhl_tanh),
        #GenELMClassifier(hidden_layer=srhl_tanh, regressor=log_reg),
        #GenELMClassifier(hidden_layer=srhl_sinsq),
        #GenELMClassifier(hidden_layer=srhl_sigmoid),
        #GenELMClassifier(hidden_layer=srhl_sine),
        #GenELMClassifier(hidden_layer=srhl_inv_tribas),
        #GenELMClassifier(hidden_layer=srhl_softlim),
        #GenELMClassifier(hidden_layer=srhl_gaussian),
        #GenELMClassifier(hidden_layer=srhl_multiquadric),
        #GenELMClassifier(hidden_layer=srhl_inv_multiquadric),
        #GenELMClassifier(hidden_layer=srhl_tribas),
        #GenELMClassifier(hidden_layer=srhl_hardlim),
        #ELMClassifier(activation_func="hardlim",alpha=1,n_hidden=nh),
        #GenELMClassifier(hidden_layer=srhl_rbf),
        #LogisticRegression(),
        #LinearDiscriminantAnalysis(),
        #KNeighborsClassifier(),
        #DecisionTreeClassifier(),
        #GaussianNB(),
        #RandomForestClassifier(n_estimators=5),
        #SVC(kernel="rbf", gamma=0.01, C=10),
        SVC(kernel="linear", C=1),
        #SVC(kernel='rbf',C=10,gamma=0.01)
        #SVC(kernel="poly", gamma=2)
    ]

    return names, classifiers
コード例 #5
0
sKF = StratifiedKFold(n_splits=n_splits, shuffle=False)
i = 0

stop_train = False
num_epochs = 10
for train_index, test_index in sKF.split(std_X, y):
    i += 1
    x_train = std_X[train_index]
    y_train = y[train_index]
    x_test = std_X[test_index]
    y_test = y[test_index]
    #-------------------------------------------------------------------------------
    grbf = GRBFRandomLayer(n_hidden=500, grbf_lambda=0.0001)
    act = MLPRandomLayer(n_hidden=500, activation_func='sigmoid')
    rbf = RBFRandomLayer(n_hidden=290,
                         rbf_width=0.0001,
                         activation_func='sigmoid')

    clf = GenELMClassifier(hidden_layer=rbf)
    clf.fit(x_train, y_train.ravel())
    y_pre = clf.predict(x_test)
    y_score = clf.decision_function(x_test)
    fpr, tpr, thresholds = roc_curve(y_test, y_score)
    tprs.append(tpr)
    fprs.append(fpr)
    roc_auc = auc(fpr, tpr)
    tn, fp, fn, tp = confusion_matrix(y_test, y_pre).ravel()
    test_acc = (tn + tp) / (tn + fp + fn + tp)
    test_Sn = tp / (fn + tp)
    test_Sp = tn / (fp + tn)
    mcc = (tp * tn - fp * fn) / pow(
コード例 #6
0
                                       activation_func='multiquadric')),
                          ('lr', LinearRegression(fit_intercept=False))])
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rhl = RandomLayer(n_hidden=200, alpha=1.0)
elmr = GenELMRegressor(hidden_layer=rhl)
tr, ts = res_dist(mrx, mry, elmr, n_runs=200, random_state=0)
scatter(tr, ts, alpha=0.1, marker='D', c='r')

# <codecell>

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.8)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

nh = 15
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)

#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
elmr = GenELMRegressor(hidden_layer=rhl)
コード例 #7
0
ファイル: test.py プロジェクト: chestnut55/ml-elm
data = pd.read_table(otuinfile, sep='\t', index_col=1)
filtered_data = data.dropna(axis='columns', how='all')
X = filtered_data.drop(['label', 'numOtus'], axis=1)
metadata = pd.read_table(mapfile, sep='\t', index_col=0)
y = metadata[disease_col]
## Merge adenoma and normal in one-category called no-cancer, so we have binary classification
y = y.replace(to_replace=['normal', 'adenoma'],
              value=['no-cancer', 'no-cancer'])

encoder = LabelEncoder()
y = pd.Series(encoder.fit_transform(y), index=y.index, name=y.name)

A, P, Y, Q = train_test_split(X, y, test_size=0.15,
                              random_state=42)  # Can change to 0.2

srhl_rbf = RBFRandomLayer(n_hidden=50, rbf_width=0.1, random_state=0)
clf6 = GenELMClassifier(hidden_layer=srhl_rbf).fit(A, Y.values.ravel())
print("Accuracy of Extreme learning machine Classifier: " +
      str(clf6.score(P, Q)))

#==============================================
plt.figure()
cls = 0
# Set figure size and plot layout
figsize = (20, 15)
f, ax = plt.subplots(1, 1, figsize=figsize)

x = [clf6, 'purple', 'ELM']

#y_true = Q[Q.argsort().index]
y_score = x[0].decision_function(P)
コード例 #8
0
                                       activation_func='multiquadric')),
                          ('lr', LinearRegression(fit_intercept=False))])
elmr.fit(xtoy_train, ytoy_train)
print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rhl = RandomLayer(n_hidden=200, alpha=1.0)
elmr = GenELMRegressor(hidden_layer=rhl)
tr, ts = res_dist(mrx, mry, elmr, n_runs=200, random_state=0)
scatter(tr, ts, alpha=0.1, marker='D', c='r')

# <codecell>

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.8)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print(elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test))
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

nh = 15
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)

#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
elmr = GenELMRegressor(hidden_layer=rhl)
コード例 #9
0
    else:
        if showIndividualPredictions:
            print(prediction + ' vs ' + actual)
            print("Wrong!")
        wrongs += 1

print("Rights: " + str(rights))
print("Wrongs:" + str(wrongs))
print("Score: " + str(rights / (rights + wrongs) * 100) + "%")
print("\n")

#####ELM with RBF Random Layer#####
from random_layer import RBFRandomLayer
from elm import GenELMClassifier as classifier
model = classifier(
    hidden_layer=RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01))
model.fit(trainX, trainy)
dump(model, 'ELMRBFModel.bin')

print(
    "##########Testing person identification with ELM with RBF Random Layer model##########"
)
predictions = model.predict(testX)

rights, wrongs = 0, 0
for prediction, actual in zip(predictions, testy):
    if prediction == actual:
        if showIndividualPredictions:
            print(prediction)
            print("Correct!")
        rights += 1
コード例 #10
0
elmc = SimpleELMClassifier(n_hidden=500)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

# SimpleELMRegressor test
elmr = SimpleELMRegressor()
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

# RBF tests
elmc = ELMClassifier(RBFRandomLayer(activation_func='gaussian'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

elmc = ELMClassifier(RBFRandomLayer(activation_func='poly_spline', gamma=2))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

elmc = ELMClassifier(RBFRandomLayer(activation_func='multiquadric'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

# Simple tests
elmc = ELMClassifier(SimpleRandomLayer(activation_func='sine'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

elmc = ELMClassifier(SimpleRandomLayer(activation_func='tanh'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)
コード例 #11
0
print
elmr.score(x, y), elmr.score(x, y)
plot(x, y, x, elmr.predict(x))


# <codecell>


# <codecell>



# <codecell>

#rhl = RandomLayer(n_hidden=1000, alpha=1.0)
rhl = RBFRandomLayer(n_hidden=500, rbf_width=0.0001)
elmr = GenELMClassifier(hidden_layer=rhl)
elmr.fit(x_train, y_train)
predicted = elmr.predict(x_test)
#precision, recall, fscore, support = score(y_test, predicted)

print("RBF Random")
#print('precision: {}'.format(precision))
#print('recall: {}'.format(recall))
#print('fscore: {}'.format(fscore))
#print('support: {}'.format(support))
#print("RBF different rbf_width",elmr.score(y_test,elmr.predict(x_test)))



# <codecell>