hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr, ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

# <codecell>

elmc = ELMClassifier(n_hidden=1000,
                     activation_func='gaussian',
                     alpha=0.0,
                     random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmc = ELMClassifier(n_hidden=500,
                     activation_func='hardlim',
                     alpha=1.0,
                     random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
示例#2
0
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
"""
# <codecell>

elmc = ELMClassifier(n_hidden=1000,
                     activation_func='multiquadric',
                     alpha=0.001,
                     random_state=0)
elmc.fit(cusx_train, cusy_train)
print elmc.score(cusx_train, cusy_train), elmc.score(cusx_test, cusy_test)

# <codecell>
"""
elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>
示例#3
0
from sklearn.datasets import load_digits
from elm import ELMClassifier

# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# normalization is important
X/=255

clf = ELMClassifier(n_hidden=30)
clf.fit(X,y)

print 'score:', clf.score(X,y)
示例#4
0
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

# <codecell>

elmc = ELMClassifier(n_hidden=1000, activation_func='gaussian', alpha=0.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr, ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

# <codecell>

elmc = ELMClassifier(n_hidden=20,
                     activation_func='hardlim',
                     alpha=0.0,
                     random_state=0)
elmc.fit(X_train, y_train)
print(elmc.score(X_train, y_train), elmc.score(X_test, y_test))

clf = svm.SVC(kernel='rbf', gamma=0.001, C=100, cache_size=20000)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train), clf.score(X_test, y_test))

from sklearn import pipeline
from sklearn.linear_model import LinearRegression
elmr = pipeline.Pipeline([('rhl',
                           RandomLayer(random_state=0,
                                       activation_func='hardlim')),
                          ('lr', LinearRegression(fit_intercept=False))])
elmr.fit(X_train, y_train)
print(elmr.score(X_train, y_train), elmr.score(X_test, y_test))

plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
示例#6
0
from sklearn.datasets import load_digits
from elm import ELMClassifier

# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# normalization is important
X /= 255

clf = ELMClassifier(n_hidden=30)
clf.fit(X, y)

print 'score:', clf.score(X, y)
示例#7
0
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='poly_spline', gamma=3)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='multiquadric', gamma=1)
rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
elmr = ELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, gamma=0.1)
elmc_rbf = ELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)

def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)

#tanh_rhl = SimpleRandomLayer(n_hidden=5000, random_state=0)
tanh_rhl = SimpleRandomLayer(n_hidden=5000, activation_func=powtanh_xfer, activation_args={'power':2.0})
elmc_tanh = ELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, gamma=0.1)
tr, ts = res_dist(dgx, dgy, ELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)
shfl_data = loaded_data.data[rand_perm]
shfl_target = loaded_data.target[rand_perm]
# divide train data and test data
offset = np.floor(data_size*0.8) # 80 percent data as training
train_data = shfl_data[:offset,:]
train_target = shfl_target[:offset]
test_data = shfl_data[offset+1:,:]
test_target = shfl_target[offset+1:]

# Build ELM Classifier using default value
elm_classifier = ELMClassifier()
# Train ELM
elm_classifier.fit(train_data,train_target)

predicted_class = elm_classifier.predict(test_data)
error_elm = 1.0 - elm_classifier.score(test_data, test_target)
print 'Predicted Class\t\t', predicted_class
print 'Original Class\t\t', test_target
print 'Error Score\t\t\t', error_elm
print ''

#print '##############################################################\nSINGLE GEN-ELM RESULT\n##############################################################'
from elm import GenELMClassifier
## Build GEN_ELM Classifier using default value
#genelm_classifier = GenELMClassifier()
## Train the ELM
#genelm_classifier.fit(train_data,train_target)
#
#predicted_class = genelm_classifier.predict(test_data)
#error_elm = 1.0 - genelm_classifier.score(test_data, test_target)
#print 'Predicted Class\t\t', predicted_class
示例#9
0
from elm import ELMClassifier
from sklearn import datasets
from elm import elmUtils, BvsbUtils
import numpy as np

#data = datasets.fetch_olivetti_faces()  # 这是第1个
# data = sklearn.datasets.fetch_covtype()#这是第2个
data = datasets.load_iris()  #这是第3个
# data = datasets.load_digits()#这是第4个  再前面加#屏蔽语句,把运行的打开
#data = datasets.load_wine()#这是第5个
# data = datasets.load_breast_cancer()#这是第6个

#data=elmUtils.coverDataFileToData("./data/abalone.data", targetIndex=-1, transformIndex=[0])
#data=elmUtils.readDataFileToData("data/balance-scale.data", targetIndex=0)
#data = datasets.fetch_olivetti_faces()  # 稀疏矩阵,必须转换和降维
# 数据集不全为数字时,needOneHot=True, target 不为数字时,needLabelEncoder=True
#data.data,data.target=elmUtils.processingData(data.data, data.target)
#data.data=BvsbUtils.dimensionReductionWithPCA(data.data,100) #kddcpu99 维度太高,必须进行降维
print(data.data.shape)
label_size = 0.3

(train_data, test_data) = elmUtils.splitData(data.data, data.target,
                                             1 - label_size)
elmc = ELMClassifier(n_hidden=1000,
                     activation_func='tanh',
                     alpha=1.0,
                     random_state=0)
elmc.fit(train_data[0], train_data[1])
print(elmc.score(test_data[0], test_data[1]))
示例#10
0
    print "\nTime: %.3f secs" % (time() - start_time)

    print "Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (
        min(test_res), np.mean(test_res), max(test_res), np.std(test_res))
    print "Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (
        min(train_res), np.mean(train_res), max(train_res), np.std(train_res))
    print
    return (train_res, test_res)


stdsc = StandardScaler()

iris = load_iris()
irx, iry = stdsc.fit_transform(iris.data), iris.target
irx_train, irx_test, iry_train, iry_test = train_test_split(irx,
                                                            iry,
                                                            test_size=0.2)

srh = SimpleRandomHiddenLayer(activation_args='sigmoid', n_hidden=500)
elmc = ELMClassifier(hidden_layer=srh)
# elmc = ELMClassifier(SimpleRandomHiddenLayer(activation_func='sigmoid'))
# print "SimpleRandomHiddenLayer(activation_func='sigmoid')"
# tr,ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)
# plt.hist(tr),plt.hist(tr)
# plt.show()

elmc.fit(irx_train, iry_train)
r = elmc.predict(irx_test)
print r
res = elmc.score(irx_test, iry_test)
print res
示例#11
0
from elm import ELMClassifier
from sklearn import linear_model


def load_mnist(path='../Data/mnist.pkl'):
    with open(path, 'rb') as f:
        return cPickle.load(f)


def get_datasets(data):
    _train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
    _val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
    _test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)

    return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y


if __name__ == '__main__':
    # Load data sets
    train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
    # Build ELM
    cls = ELMClassifier(n_hidden=7000,
                        alpha=0.93,
                        activation_func='multiquadric',
                        regressor=linear_model.Ridge(),
                        random_state=21398023)
    cls.fit(train_x, train_y)
    # Evaluate model
    print 'Validation error:', cls.score(val_x, val_y)
    print 'Test error:', cls.score(test_x, test_y)
示例#12
0
#data=elmUtils.readDataFileToData("./data/glass.data", targetIndex=-1)#xxxxxxxxxxxx
#data = datasets.fetch_olivetti_faces()#OK
#---------------------------数据集---------------------------------

#data.target = data.target + 1#(这里什么时候要什么时候不要)
stdc = StandardScaler()
label_size = 0.1  #已标记样本比例,分别取0.05-0.1-0.2-0.3-0.4
acc_rem = []  #初始化精度列表
for ii in range(1):
    # 数据集不全为数字时,needOneHot=True, target 不为数字时,needLabelEncoder=True
    data.data, data.target = elmUtils.processingData(data.data, data.target)
    print(data.data.shape)
    (train_data, test_data) = elmUtils.splitData(data.data, data.target,
                                                 1 - label_size)
    elmc = ELMClassifier(n_hidden=1000,
                         activation_func='tanh',
                         alpha=1.0,
                         random_state=0)
    elmc.fit(train_data[0], train_data[1])
    print(elmc.score(test_data[0], test_data[1]))

    acc_temp = elmc.score(test_data[0], test_data[1])  #记录每次的精度
    acc_rem.append(acc_temp)  #将每次的精度存入列表

print("*****************************************************")
for i in acc_rem:
    print(f'{i*100:0.2f}', )  #打印每次精度
acc_mean = np.mean(acc_rem)  #求出平均精度
print("{:.2f}".format(acc_mean * 100))  #打印平均精度
print("-----------------以下为ELM算法(10次)---------------")
示例#13
0
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
rhl = RBFRandomHiddenLayer(n_hidden=nh, activation_func='poly_spline', gamma=3)
#rhl = RBFRandomHiddenLayer(n_hidden=nh, activation_func='multiquadric', gamma=1)
#rhl = RBFRandomHiddenLayer(n_hidden=nh, centers=ctrs, radii=unit_rs, gamma=4)
elmr = ELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomHiddenLayer(n_hidden=100, random_state=0, gamma=0.1)
elmc_rbf = ELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)


def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)


#tanh_rhl = SimpleRandomHiddenLayer(n_hidden=5000, random_state=0)
tanh_rhl = SimpleRandomHiddenLayer(n_hidden=5000,
                                   activation_func=powtanh_xfer,
                                   activation_args={'power': 2.0})
elmc_tanh = ELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train,
                      dgy_train), elmc_tanh.score(dgx_test, dgy_test)