示例#1
0
def classfiers(X, Y, times, fold):
    #用6个分类器对X,Y做交叉验证 times次数 fold折
    classifier = [
        SVC(kernel='linear'),
        GaussianNB(),
        KNeighborsClassifier(),
        DecisionTreeClassifier(random_state=10),
        GradientBoostingClassifier(random_state=10),
        ELMClassifier()
    ]
    acc_res = []
    for clf in classifier:
        if clf == classifier[-1]:
            X = stdsc.fit_transform(X)
        each_score = []
        for i in range(times):
            acc_temp = []
            skf = StratifiedKFold(n_splits=fold, random_state=i, shuffle=True)
            for train_index, test_index in skf.split(X, Y):
                # print('Train: ',train_index,'Test: ',test_index)
                X_train, X_test = X[train_index], X[test_index]
                Y_train, Y_test = Y[train_index], Y[test_index]
                clf.fit(X_train, Y_train)
                acc1 = accuracy_score(Y_test, clf.predict(X_test))
                acc_temp.append(acc1)
            each_score.append(np.mean(acc_temp))
        acc_res.append(np.mean(each_score))
    return acc_res
示例#2
0
def make_classifiers():

    names = [
        "ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)", "ELM(10,tribas)",
        "ELM(hardlim)", "ELM(20,rbf(0.1))"
    ]

    nh = 10

    # pass user defined transfer func
    sinsq = (lambda x: np.power(np.sin(x), 2.0))
    srhl_sinsq = SimpleRandomHiddenLayer(n_hidden=nh,
                                         activation_func=sinsq,
                                         random_state=0)

    # use internal transfer funcs
    srhl_tanh = SimpleRandomHiddenLayer(n_hidden=nh,
                                        activation_func='tanh',
                                        random_state=0)

    srhl_tribas = SimpleRandomHiddenLayer(n_hidden=nh,
                                          activation_func='tribas',
                                          random_state=0)

    srhl_hardlim = SimpleRandomHiddenLayer(n_hidden=nh,
                                           activation_func='hardlim',
                                           random_state=0)

    # use gaussian RBF
    srhl_rbf = RBFRandomHiddenLayer(n_hidden=nh * 2, gamma=0.1, random_state=0)

    log_reg = LogisticRegression()

    classifiers = [
        ELMClassifier(srhl_tanh),
        ELMClassifier(srhl_tanh, regressor=log_reg),
        ELMClassifier(srhl_sinsq),
        ELMClassifier(srhl_tribas),
        ELMClassifier(srhl_hardlim),
        ELMClassifier(srhl_rbf)
    ]

    return names, classifiers
示例#3
0
 labelMat = mat(labelArr)
 # labelMat[labelMat == -1] = 0
 # labelArr = array(labelMat)
 testDatArr, testLabelArr = loadDataSet(TESTDATA, low_limit, up_limit)
 testLabelMat = mat(testLabelArr)
 # testLabelMat[testLabelMat == -1] = 0
 # testLabelArr = array(testLabelMat)
 # print targets[:10]
 y_train_pred = zeros(shape(labelArr))
 y_preds = zeros(shape(testLabelArr))
 X_test = testDatArr
 y_test = testLabelArr
 # clf = ELMClassifier(n_hidden=100, activation_func='gaussian', alpha=0.0, random_state=1)
 # clf = ELMClassifier(n_hidden=100 000, activation_func='hardlim', alpha=1.0, random_state=0)
 clf = ELMClassifier(n_hidden=100,
                     activation_func='hardlim',
                     alpha=1.0,
                     random_state=1)
 for iter in range(2):
     # y_target = mat(labelArr)
     y_target = sign(mat(labelArr) - mat(y_train_pred))
     print y_target
     print y_train_pred
     # print shape(y_target)
     # iterate over classifiers
     clf.fit(datArr, list(array(y_target).reshape(-1)))
     y_train_pred = clf.predict(datArr)
     y_pred = clf.predict(testDatArr)
     acc = calcAccurary(sign(y_train_pred), labelArr)
     print "Training accurarcy:\n"
     print acc
     acc = calcAccurary(sign(y_pred), y_test)
示例#4
0
from sklearn.datasets import load_digits
from elm import ELMClassifier

# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# normalization is important
X /= 255

clf = ELMClassifier(n_hidden=30)
clf.fit(X, y)

print 'score:', clf.score(X, y)
        [0.01, 0.99], 7
    )  # train_data.first() [4.6, 3.1, 1.5, 0.2, 0]   unlabel_data.first() #[5.1, 3.5, 1.4, 0.2, 0]
    unlabel_data.persist()
    # 训练集属性值不含类标 用于训练
    train_array = train_data.map(
        lambda x: x[0:feature_num]).collect()  # [4.6, 3.1, 1.5, 0.2]
    #     # 训练集 类标集合
    train_label = train_data.map(lambda x: x[class_index]).collect()

    start = time()

    # 创建隐含层
    srh = SimpleRandomHiddenLayer(activation_args=activation,
                                  n_hidden=hiddenLayer_num)
    # 创建ELM分类器
    elmc = ELMClassifier(hidden_layer=srh)
    for i in range(iter_num):
        print "-" * 20 + " %d train" % (i + 1) + "-" * 20
        #     ###############  ELM 训练  #############
        print "train_array_num:", len(train_array)
        #训练分类器
        elmc.fit(train_array, train_label)
        pred_class = elmc.predict_class(test_array)
        #分类精度
        soc = accuracy_score(pred_class, test_label)
        print "test_soc:", soc

        #对无类标的数据集进行预测 每个样例的到一个向量  然后进行软最大化处理 之后计算熵  按熵降序排序 取出前select_num 数的样例
        select_result = unlabel_data.map(lambda x: (enry(x), x)).sortByKey(
            ascending=False).top(select_num)
        # print select_result  #[(1.017120190624998, [6.1, 3.0, 4.6, 1.4, 1]), (1.016313951000489, [7.4, 2.8, 6.1, 1.9, 2]),
示例#6
0
        x))  #.map(lambda x:[float_feature(x[0:4]),iris_label_transfor(x[4])])
    print split_data.collect()
    test_data = split_data.sample(False, 0.2, 87)
    test_array = test_data.map(lambda x: x[0:feature_num]).collect()
    test_label = test_data.map(lambda x: x[class_index]).collect()
    print "test_data 1%:", sum(test_label)
    # 将数据集划分为训练集 与 无类标的数据集  3比7
    train_data, unlabel_data = split_data.randomSplit(
        [0.2, 0.8], 7
    )  # train_data.first() [4.6, 3.1, 1.5, 0.2, 0]   unlabel_data.first() #[5.1, 3.5, 1.4, 0.2, 0]
    print unlabel_data.collect()
    # 创建隐含层
    srh = SimpleRandomHiddenLayer(activation_args=activation,
                                  n_hidden=hiddenLayer_num)
    # 创建ELM分类器
    elmc = ELMClassifier(hidden_layer=srh)
    for i in range(1):
        print "-" * 20 + "%d train" % (i + 1) + "-" * 20
        # 训练集属性值不含类标 用于训练
        train_ = train_data.map(
            lambda x: x[0:feature_num])  #[4.6, 3.1, 1.5, 0.2]
        #将无类标数据属性集合 不含类标用于预测
        unlabel_ = unlabel_data.map(
            lambda x: x[0:feature_num])  #[5.1, 3.5, 1.4, 0.2]
        # 训练集 类标集合
        train_label = train_data.map(lambda x: x[class_index]).collect()
        print train_label
        ###############  ELM 训练  #############

        train_array = train_.collect()
        print "train_array_num:", len(train_array)
示例#7
0
文件: elm_single.py 项目: cgq5/elm
        up_limit = 1000
        # Load training file..
        datArr,labelArr = loadDataSet(INPUTDATA, low_limit, up_limit)
	labelMat = mat(labelArr)
	# labelMat[labelMat == -1] = 0 
	# labelArr = array(labelMat)
        testDatArr,testLabelArr = loadDataSet(TESTDATA, low_limit, up_limit)
	testLabelMat = mat(testLabelArr)
	# testLabelMat[testLabelMat == -1] = 0 
	# testLabelArr = array(testLabelMat)
	# print targets[:10]
	y_train_pred = zeros(shape(labelArr)); y_preds = zeros(shape(testLabelArr))
	X_test = testDatArr; y_test = testLabelArr
	# clf = ELMClassifier(n_hidden=100, activation_func='gaussian', alpha=0.0, random_state=1)
	# clf = ELMClassifier(n_hidden=100 000, activation_func='hardlim', alpha=1.0, random_state=0)
	clf = ELMClassifier(n_hidden=100, activation_func='hardlim', alpha=1.0, random_state=1)
	for iter in range(2):
		# y_target = mat(labelArr)
		y_target = sign(mat(labelArr) - mat(y_train_pred));
		print y_target
		print y_train_pred
		# print shape(y_target)
		# iterate over classifiers
		clf.fit(datArr, list(array(y_target).reshape(-1)))
		y_train_pred = clf.predict(datArr)
		y_pred = clf.predict(testDatArr)
		acc = calcAccurary(sign(y_train_pred), labelArr)
		print "Training accurarcy:\n"
		print acc
		acc = calcAccurary(sign(y_pred), y_test)
		print acc
示例#8
0
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
"""
# <codecell>

elmc = ELMClassifier(n_hidden=1000,
                     activation_func='multiquadric',
                     alpha=0.001,
                     random_state=0)
elmc.fit(cusx_train, cusy_train)
print elmc.score(cusx_train, cusy_train), elmc.score(cusx_test, cusy_test)

# <codecell>
"""
elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
示例#9
0
dbx_train, dbx_test, dby_train, dby_test = train_test_split(dbx, dby, test_size=0.2)

mrx, mry = make_regression(n_samples=2000, n_targets=4)
mrx_train, mrx_test, mry_train, mry_test = train_test_split(mrx, mry, test_size=0.2)

xtoy, ytoy = make_toy()
xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)
xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy, ytoy, test_size=0.2)
plot(xtoy, ytoy)

# <codecell>

# RBFRandomLayer tests
for af in RandomLayer.activation_func_names():
    print af,
    elmc = ELMClassifier(activation_func=af)
    tr,ts = res_dist(irx, iry, elmc, n_runs=200, random_state=0)

# <codecell>

elmc.classes_

# <codecell>

for af in RandomLayer.activation_func_names():
    print af
    elmc = ELMClassifier(activation_func=af, random_state=0)
    tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)

# <codecell>
示例#10
0
from elm import ELMClassifier
from sklearn import datasets
from elm import elmUtils, BvsbUtils
import numpy as np

#data = datasets.fetch_olivetti_faces()  # 这是第1个
# data = sklearn.datasets.fetch_covtype()#这是第2个
data = datasets.load_iris()  #这是第3个
# data = datasets.load_digits()#这是第4个  再前面加#屏蔽语句,把运行的打开
#data = datasets.load_wine()#这是第5个
# data = datasets.load_breast_cancer()#这是第6个

#data=elmUtils.coverDataFileToData("./data/abalone.data", targetIndex=-1, transformIndex=[0])
#data=elmUtils.readDataFileToData("data/balance-scale.data", targetIndex=0)
#data = datasets.fetch_olivetti_faces()  # 稀疏矩阵,必须转换和降维
# 数据集不全为数字时,needOneHot=True, target 不为数字时,needLabelEncoder=True
#data.data,data.target=elmUtils.processingData(data.data, data.target)
#data.data=BvsbUtils.dimensionReductionWithPCA(data.data,100) #kddcpu99 维度太高,必须进行降维
print(data.data.shape)
label_size = 0.3

(train_data, test_data) = elmUtils.splitData(data.data, data.target,
                                             1 - label_size)
elmc = ELMClassifier(n_hidden=1000,
                     activation_func='tanh',
                     alpha=1.0,
                     random_state=0)
elmc.fit(train_data[0], train_data[1])
print(elmc.score(test_data[0], test_data[1]))
示例#11
0
from sklearn.calibration import CalibratedClassifierCV
from elm import ELMClassifier,OSELM
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split

data = load_digits()

x_train, x_test, y_train, y_test = train_test_split(data.data, data.target,test_size=0.7)
elmc = ELMClassifier(n_hidden=1000, activation_func='tanh', alpha=1.0, random_state=0)
oselm = OSELM(x_train, y_train, 1000,active_function="sigmoid")

#cccv=CalibratedClassifierCV(oselm,cv=2,method="isotonic")
cccv=CalibratedClassifierCV(elmc,cv=2,method="isotonic")
cccv.fit(x_train,y_train)
r=cccv.predict_proba(x_test)


#https://blog.csdn.net/yolohohohoho/article/details/99679680

print(r)
示例#12
0
    print "\nTime: %.3f secs" % (time() - start_time)

    print "Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (
        min(test_res), np.mean(test_res), max(test_res), np.std(test_res))
    print "Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (
        min(train_res), np.mean(train_res), max(train_res), np.std(train_res))
    print
    return (train_res, test_res)


stdsc = StandardScaler()

iris = load_iris()
irx, iry = stdsc.fit_transform(iris.data), iris.target
irx_train, irx_test, iry_train, iry_test = train_test_split(irx,
                                                            iry,
                                                            test_size=0.2)

srh = SimpleRandomHiddenLayer(activation_args='sigmoid', n_hidden=500)
elmc = ELMClassifier(hidden_layer=srh)
# elmc = ELMClassifier(SimpleRandomHiddenLayer(activation_func='sigmoid'))
# print "SimpleRandomHiddenLayer(activation_func='sigmoid')"
# tr,ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)
# plt.hist(tr),plt.hist(tr)
# plt.show()

elmc.fit(irx_train, iry_train)
r = elmc.predict(irx_test)
print r
res = elmc.score(irx_test, iry_test)
print res
示例#13
0
def simulate(trn, tst):
    start = time.time()
    b_tp = b_fp = b_tn = b_fn = 0
    s_tp = s_fp = s_tn = s_fn = 0
    b_min = s_min = 1000000
    b_max = s_max = 0
    b_money = s_money = 0
    b_money_vec = [0]
    s_money_vec = [0]
    b_gain = s_gain = 0
    b_loss = s_loss = 0
    b_draw = s_draw = 0
    b_gain_vec = []
    s_gain_vec = []
    b_loss_vec = []
    s_loss_vec = []
    b_max_drawdown = s_max_drawdown = 0
    b_pos = s_pos = False
    time_vec = []
    aux_ii = len(tst) - 1

    for t, val in enumerate(tst):
        start_i = time.time()

        if t == 201:
            continue

        if t == 0:
            tst[0, 5] = id.log_return(tst[0, 0], tst[0, 3], trn[-1, 0], trn[-1,
                                                                            3])
        else:
            tst[t, 5] = id.log_return(tst[t, 0], tst[t, 3], trn[t - 1, 0],
                                      trn[t - 1, 3])
        tst[t, 6] = mnm.get(val[5])
        tst[t, 7] = obv.get_obv(val[3], val[4])
        aux = bbs.sma(val[3])
        if aux is not None:
            tst[t, 8], tst[t, 9] = aux
        aux_9 = m_9.ema(val[3])
        aux12 = m12.ema(val[3])
        aux26 = m26.ema(val[3])
        tst[t, 10] = aux12 - aux26
        tst[t, 11] = tst[t, 10] - aux_9

        aux = trn[-1000:]
        aux_i = [(i[1] - mn[i[0]]) * mx[i[0]] for i in enumerate(tst[t, :12])]
        # aux_j = trn[-1000:, :]

        b_elm = ELMClassifier(random_state=0,
                              n_hidden=200,
                              activation_func='sigmoid',
                              alpha=0.0)
        b_elm.fit(aux[:, :12], aux[:, 12])
        b_res = b_elm.predict([aux_i[:12]])
        s_elm = ELMClassifier(random_state=0,
                              n_hidden=200,
                              activation_func='sigmoid',
                              alpha=0.0)
        s_elm.fit(aux[:, :12], aux[:, 13])
        s_res = s_elm.predict([aux_i[:12]])

        if b_res == 1.0:
            if val[12] == 1.0:
                b_tp += 1
            else:
                b_fp += 1
            if not b_pos:
                # Entra
                b_money -= val[3]
                b_pos = True
        else:
            if val[12] == 0.0:
                b_tn += 1
            else:
                b_fn += 1
            if b_pos:
                # Sai
                b_money += val[3]
                b_pos = False
                if b_money < b_money_vec[-1]:
                    b_loss += 1
                    b_loss_vec.append(b_money_vec[-1] - b_money)
                elif b_money > b_money_vec[-1]:
                    b_gain += 1
                    b_gain_vec.append(b_money - b_money_vec[-1])
                else:
                    b_draw += 1
        if val[14] == 1.0:
            # Sai
            b_money += val[3]
            b_pos = False
            if b_money < b_money_vec[-1]:
                b_loss += 1
                b_loss_vec.append(b_money_vec[-1] - b_money)
            elif b_money > b_money_vec[-1]:
                b_gain += 1
                b_gain_vec.append(b_money - b_money_vec[-1])
            else:
                b_draw += 1

        if b_pos:
            b_money_vec.append(b_money_vec[-1])
        else:
            b_money_vec.append(b_money)
            if b_money > b_max:
                b_max = b_money
            if b_money < b_min:
                b_min = b_money

        if s_res == 1.0:
            if val[13] == 1.0:
                s_tp += 1
            else:
                s_fp += 1
            if not s_pos:
                # Entra
                s_money += val[3]
                s_pos = True
        else:
            if val[13] == 0.0:
                s_tn += 1
            else:
                s_fn += 1
            if s_pos:
                # Sai
                s_money -= val[3]
                s_pos = False
                if s_money < s_money_vec[-1]:
                    s_loss += 1
                    s_loss_vec.append(s_money_vec[-1] - s_money)
                elif s_money > s_money_vec[-1]:
                    s_gain += 1
                    s_gain_vec.append(s_money - s_money_vec[-1])
                else:
                    s_draw += 1
        if val[14] == 1.0:
            # Sai
            s_money -= val[3]
            s_pos = False
            if s_money < s_money_vec[-1]:
                s_loss += 1
                s_loss_vec.append(s_money_vec[-1] - s_money)
            elif s_money > s_money_vec[-1]:
                s_gain += 1
                s_gain_vec.append(s_money - s_money_vec[-1])
            else:
                s_draw += 1

        if s_pos:
            s_money_vec.append(s_money_vec[-1])
        else:
            s_money_vec.append(s_money)
            if s_money > s_max:
                s_max = s_money
            if s_money < s_min:
                s_min = s_money

        # print(aux_i + list(tst[t, 12:]))
        trn = np.append(trn, [aux_i + list(tst[t, 12:])], axis=0)
        time_vec.append(time.time() - start_i)
        sys.stdout.write('\r' + '%6d / %d' % (t, aux_ii) + '\033[K')
    sys.stdout.write('\r' + '>> %6.2f: Simulation Done!\n\n' %
                     (time.time() - start) + '\033[K')

    print('#### ' + sys.argv[1] + ' ####')
    print('Tempo médio: %f' % np.mean(time_vec))
    print('Final      : %5.5f | %5.5f' % (b_money, s_money))
    # print('Final      : %5.5f | %5.5f' % (b_money_vec[-1], s_money_vec[-1]))
    print('Minimo     : %5.5f | %5.5f' % (b_min, s_min))
    print('Maximo     : %5.5f | %5.5f' % (b_max, s_max))
    print('Ganho qtd  : %10d | %10d' % (b_gain, s_gain))
    print('Perda qtd  : %10d | %10d' % (b_loss, s_loss))
    print('Empate qtd : %10d | %10d' % (b_draw, s_draw))
    print('Ganho medio: %5.5f | %5.5f' %
          (np.mean(b_gain_vec), np.mean(s_gain_vec)))
    print('Perda media: %5.5f | %5.5f' %
          (np.mean(b_loss_vec), np.mean(s_loss_vec)))
    print('TP         : %10d | %10d' % (b_tp, s_tp))
    print('FP         : %10d | %10d' % (b_fp, s_fp))
    print('TN         : %10d | %10d' % (b_tn, s_tn))
    print('FN         : %10d | %10d' % (b_fn, s_fn))

    plot(b_money_vec, s_money_vec, sys.argv[1], tst[:, 3])
示例#14
0
from elm import ELMClassifier
from sklearn import linear_model


def load_mnist(path='../Data/mnist.pkl'):
    with open(path, 'rb') as f:
        return cPickle.load(f)


def get_datasets(data):
    _train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
    _val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
    _test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)

    return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y


if __name__ == '__main__':
    # Load data sets
    train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
    # Build ELM
    cls = ELMClassifier(n_hidden=7000,
                        alpha=0.93,
                        activation_func='multiquadric',
                        regressor=linear_model.Ridge(),
                        random_state=21398023)
    cls.fit(train_x, train_y)
    # Evaluate model
    print 'Validation error:', cls.score(val_x, val_y)
    print 'Test error:', cls.score(test_x, test_y)
示例#15
0
    tmpX = np.array([ftr1, ftr2, ftr3, ftr4, ftr5, ftr6])
    X[i] = tmpX

#Make the output from training data
y = np.zeros(len(trainData) - 3)
for i in range(0, len(trainData) - 3):
    if trainData['changerange'][i+3] >= 0:
        y[i] = 1
    else:
        y[i] = -1

srhl_tanh = SimpleRandomHiddenLayer(n_hidden=10, activation_func='tanh', random_state=0)
srhl_rbf = RBFRandomHiddenLayer(n_hidden=200*2, gamma=0.1, random_state=0)
#create ELM instance
reg = ELMRegressor(srhl_tanh)
cla = ELMClassifier(srhl_tanh)
#fit the data
reg = reg.fit(X, y)
cla = cla.fit(X, y)

#Read testing data from testing.csv
testData = pd.read_csv('testing.csv')
pdt = np.zeros(len(testData) - 3)
pdt2 = np.zeros(len(testData) - 3)

for i in range(len(testData) - 3):
    ftr1 = float(testData['close'][i]) - float(testData['open'][i])
    ftr2 = float(testData['close'][i+1]) - float(testData['open'][i+1])
    ftr3 = float(testData['close'][i+2]) - float(testData['open'][i+2])
    ftr4 = float(testData['highest'][i] - float(testData['lowest'][i]))
    ftr5 = float(testData['highest'][i+1] - float(testData['lowest'][i+1]))
示例#16
0
from sklearn.datasets import load_digits
from elm import ELMClassifier

# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# normalization is important
X/=255

clf = ELMClassifier(n_hidden=30)
clf.fit(X,y)

print 'score:', clf.score(X,y)
示例#17
0
nh = 10
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='poly_spline', gamma=3)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='multiquadric', gamma=1)
rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
elmr = ELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, gamma=0.1)
elmc_rbf = ELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)

def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)

#tanh_rhl = SimpleRandomLayer(n_hidden=5000, random_state=0)
tanh_rhl = SimpleRandomLayer(n_hidden=5000, activation_func=powtanh_xfer, activation_args={'power':2.0})
elmc_tanh = ELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, gamma=0.1)
示例#18
0
loaded_data = load_iris()
# Generate test and train data
data_size = np.size(loaded_data.data,0)
rand_perm = np.random.permutation(data_size) # generate random permutation
# Shuffle target and data
shfl_data = loaded_data.data[rand_perm]
shfl_target = loaded_data.target[rand_perm]
# divide train data and test data
offset = np.floor(data_size*0.8) # 80 percent data as training
train_data = shfl_data[:offset,:]
train_target = shfl_target[:offset]
test_data = shfl_data[offset+1:,:]
test_target = shfl_target[offset+1:]

# Build ELM Classifier using default value
elm_classifier = ELMClassifier()
# Train ELM
elm_classifier.fit(train_data,train_target)

predicted_class = elm_classifier.predict(test_data)
error_elm = 1.0 - elm_classifier.score(test_data, test_target)
print 'Predicted Class\t\t', predicted_class
print 'Original Class\t\t', test_target
print 'Error Score\t\t\t', error_elm
print ''

#print '##############################################################\nSINGLE GEN-ELM RESULT\n##############################################################'
from elm import GenELMClassifier
## Build GEN_ELM Classifier using default value
#genelm_classifier = GenELMClassifier()
## Train the ELM
                                                            mry,
                                                            test_size=0.2)

xtoy, ytoy = make_toy()
xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)
xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy,
                                                                ytoy,
                                                                test_size=0.2)
plot(xtoy, ytoy)

# <codecell>

# RBFRandomLayer tests
for af in RandomLayer.activation_func_names():
    print af,
    elmc = ELMClassifier(activation_func=af)
    tr, ts = res_dist(irx, iry, elmc, n_runs=200, random_state=0)

# <codecell>

elmc.classes_

# <codecell>

for af in RandomLayer.activation_func_names():
    print af
    elmc = ELMClassifier(activation_func=af, random_state=0)
    tr, ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)

# <codecell>
示例#20
0
    def run(self):

        rawDataset = open('../../selectedUserMore', 'r')

        # Value 0 represents max number of apps => It must be set
        maxValuesByFeature = [6, 4, 2, 2, 2, 0, 4, 5, 3]

        datasetFeatures = []
        datasetLabels = []

        maxApps = self.findMaxNumberOfApps(rawDataset)

        # Setting max number of apps
        maxValuesByFeature[5] = maxApps

        rawDataset.seek(0)
        for line in rawDataset:
            tuples = line.split(';')

            for t in range(len(tuples)):

                tup = tuples[t]
                vals = tup.split(',')
                label = vals[0]
                features = vals[1].split('_')

                datasetFeatures.append(features)
                datasetLabels.append(label)

        rawDataset.close()

        nTuples = len(datasetLabels)

        nTrain = int(0.7 * nTuples)

        nMaxHidden = 3005
        totalAccuracy = 0.0

        trainSetFeatures = np.array(datasetFeatures[:nTrain], dtype=np.int)
        trainSetLabels = np.array(datasetLabels[:nTrain], dtype=np.int)

        testSetFeatures = np.array(datasetFeatures[nTrain:], dtype=np.int)
        testSetLabels = np.array(datasetLabels[nTrain:], dtype=np.int)

        # for x in range(200,nMaxHidden):

        ffsn = ELMClassifier(n_hidden=1000, activation_func='sine')
        ffsn.fit(trainSetFeatures, trainSetLabels)

        results = ffsn.predict(testSetFeatures)

        nPredictedCorrectly = 0

        for test in range(len(testSetLabels)):
            realValue = testSetLabels[test]
            predictedValue = results[test]

            if (int(realValue) == int(predictedValue)):
                nPredictedCorrectly += 1

            # print "Real: " + str(realValue) + " / Predicted: " + str(predictedValue)

        totalTests = nTuples - nTrain
        accuracy = float(nPredictedCorrectly) / totalTests
        # print "N Hidden: " + str(x) + " / Accuracy: " + str(accuracy)
        print "Accuracy: " + str(accuracy)
        totalAccuracy += accuracy
示例#21
0
	def run(self):

		rawDataset = open('../../selectedUserMore', 'r')

		# Value 0 represents max number of apps => It must be set
		maxValuesByFeature = [6,4,2,2,2,0,4,5,3]

		datasetFeatures = []
		datasetLabels = []

		maxApps = self.findMaxNumberOfApps(rawDataset)

		# Setting max number of apps
		maxValuesByFeature[5] = maxApps

		rawDataset.seek(0)
		for line in rawDataset:
			tuples = line.split(';')

			for t in range(len(tuples)):

				
				listFeatureNeurons = []
				listLabelNeurons = []

				tup = tuples[t]
				vals = tup.split(',')
				label = vals[0]
				features = vals[1].split('_')
				
				for feature in range(len(features)):
					val = features[feature]
					listFeatureNeurons += self.createNeuronInput(val, maxValuesByFeature[feature])
				
				listLabelNeurons += self.createNeuronInput(label, maxApps)

				datasetFeatures.append(listFeatureNeurons)
				datasetLabels.append(listLabelNeurons)

		rawDataset.close()		

		nTuples = len(datasetLabels)

		
		nTrain = int(0.7 * nTuples)
				
		nMaxHidden = 3005
		totalAccuracy = 0.0

		trainSetFeatures = datasetFeatures[:nTrain]
		trainSetLabels = datasetLabels[:nTrain]

		testSetFeatures = datasetFeatures[nTrain:]
		testSetLabels = datasetLabels[nTrain:]


		# for x in range(200,nMaxHidden):
		
		print testSetLabels
			
		ffsn = ELMClassifier(n_hidden=100, activation_func='sigmoid')
		ffsn.fit(trainSetFeatures, trainSetLabels)
示例#22
0
mrx, mry = make_regression(n_samples=2000)
mrx_train, mrx_test, mry_train, mry_test = train_test_split(mrx,
                                                            mry,
                                                            test_size=0.2)

xtoy, ytoy = make_toy()
xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)
xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy,
                                                                ytoy,
                                                                test_size=0.2)
plot(xtoy, ytoy)

# <codecell>

# RBF tests
elmc = ELMClassifier(RBFRandomHiddenLayer(activation_func='gaussian'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

elmc = ELMClassifier(
    RBFRandomHiddenLayer(activation_func='poly_spline', gamma=2))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

elmc = ELMClassifier(RBFRandomHiddenLayer(activation_func='multiquadric'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

# Simple tests
elmc = ELMClassifier(SimpleRandomHiddenLayer(activation_func='sine'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)

elmc = ELMClassifier(SimpleRandomHiddenLayer(activation_func='tanh'))
tr, ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)