random_state=0)
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr, ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

# <codecell>

elmc = ELMClassifier(n_hidden=1000,
                     activation_func='gaussian',
                     alpha=0.0,
                     random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmc = ELMClassifier(n_hidden=500,
                     activation_func='hardlim',
                     alpha=1.0,
                     random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
示例#2
0
from sklearn.datasets import load_digits
from elm import ELMClassifier

# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# normalization is important
X/=255

clf = ELMClassifier(n_hidden=30)
clf.fit(X,y)

print 'score:', clf.score(X,y)
示例#3
0
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

# <codecell>

elmc = ELMClassifier(n_hidden=1000, activation_func='gaussian', alpha=0.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
示例#4
0
from sklearn.datasets import load_digits
from elm import ELMClassifier

# testing elm on the digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# normalization is important
X /= 255

clf = ELMClassifier(n_hidden=30)
clf.fit(X, y)

print 'score:', clf.score(X, y)
示例#5
0
    X_test = testDatArr
    y_test = testLabelArr
    # clf = ELMClassifier(n_hidden=100, activation_func='gaussian', alpha=0.0, random_state=1)
    # clf = ELMClassifier(n_hidden=100 000, activation_func='hardlim', alpha=1.0, random_state=0)
    clf = ELMClassifier(n_hidden=100,
                        activation_func='hardlim',
                        alpha=1.0,
                        random_state=1)
    for iter in range(2):
        # y_target = mat(labelArr)
        y_target = sign(mat(labelArr) - mat(y_train_pred))
        print y_target
        print y_train_pred
        # print shape(y_target)
        # iterate over classifiers
        clf.fit(datArr, list(array(y_target).reshape(-1)))
        y_train_pred = clf.predict(datArr)
        y_pred = clf.predict(testDatArr)
        acc = calcAccurary(sign(y_train_pred), labelArr)
        print "Training accurarcy:\n"
        print acc
        acc = calcAccurary(sign(y_pred), y_test)
        print acc
        y_preds = y_preds + LRATE * y_pred
    print mat(y_preds)
    acc = calcAccurary(sign(y_preds), y_test)
    # print y_preds
    print "Final score:\n"
    print acc
    """
	for name, clf in zip(names, classifiers):
示例#6
0
文件: elm_single.py 项目: cgq5/elm
	# testLabelMat[testLabelMat == -1] = 0 
	# testLabelArr = array(testLabelMat)
	# print targets[:10]
	y_train_pred = zeros(shape(labelArr)); y_preds = zeros(shape(testLabelArr))
	X_test = testDatArr; y_test = testLabelArr
	# clf = ELMClassifier(n_hidden=100, activation_func='gaussian', alpha=0.0, random_state=1)
	# clf = ELMClassifier(n_hidden=100 000, activation_func='hardlim', alpha=1.0, random_state=0)
	clf = ELMClassifier(n_hidden=100, activation_func='hardlim', alpha=1.0, random_state=1)
	for iter in range(2):
		# y_target = mat(labelArr)
		y_target = sign(mat(labelArr) - mat(y_train_pred));
		print y_target
		print y_train_pred
		# print shape(y_target)
		# iterate over classifiers
		clf.fit(datArr, list(array(y_target).reshape(-1)))
		y_train_pred = clf.predict(datArr)
		y_pred = clf.predict(testDatArr)
		acc = calcAccurary(sign(y_train_pred), labelArr)
		print "Training accurarcy:\n"
		print acc
		acc = calcAccurary(sign(y_pred), y_test)
		print acc
		y_preds = y_preds + LRATE * y_pred
	print mat(y_preds)
	acc = calcAccurary(sign(y_preds), y_test)
	# print y_preds
	print "Final score:\n"
	print acc
	"""
	for name, clf in zip(names, classifiers):
    #     # 训练集 类标集合
    train_label = train_data.map(lambda x: x[class_index]).collect()

    start = time()

    # 创建隐含层
    srh = SimpleRandomHiddenLayer(activation_args=activation,
                                  n_hidden=hiddenLayer_num)
    # 创建ELM分类器
    elmc = ELMClassifier(hidden_layer=srh)
    for i in range(iter_num):
        print "-" * 20 + " %d train" % (i + 1) + "-" * 20
        #     ###############  ELM 训练  #############
        print "train_array_num:", len(train_array)
        #训练分类器
        elmc.fit(train_array, train_label)
        pred_class = elmc.predict_class(test_array)
        #分类精度
        soc = accuracy_score(pred_class, test_label)
        print "test_soc:", soc

        #对无类标的数据集进行预测 每个样例的到一个向量  然后进行软最大化处理 之后计算熵  按熵降序排序 取出前select_num 数的样例
        select_result = unlabel_data.map(lambda x: (enry(x), x)).sortByKey(
            ascending=False).top(select_num)
        # print select_result  #[(1.017120190624998, [6.1, 3.0, 4.6, 1.4, 1]), (1.016313951000489, [7.4, 2.8, 6.1, 1.9, 2]),
        train_array.extend([i[1][:feature_num] for i in select_result
                            ])  #train_array 加入新属性[6.1, 3.0, 4.6, 1.4
        train_label.extend([i[1][class_index]
                            for i in select_result])  #train_label 加入新类标
    end = time()
    print "训练 %d 次时间:" % iter_num, end - start
示例#8
0
nh = 10
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='poly_spline', gamma=3)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='multiquadric', gamma=1)
rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
elmr = ELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, gamma=0.1)
elmc_rbf = ELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)

def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)

#tanh_rhl = SimpleRandomLayer(n_hidden=5000, random_state=0)
tanh_rhl = SimpleRandomLayer(n_hidden=5000, activation_func=powtanh_xfer, activation_args={'power':2.0})
elmc_tanh = ELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, gamma=0.1)
tr, ts = res_dist(dgx, dgy, ELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)
data_size = np.size(loaded_data.data,0)
rand_perm = np.random.permutation(data_size) # generate random permutation
# Shuffle target and data
shfl_data = loaded_data.data[rand_perm]
shfl_target = loaded_data.target[rand_perm]
# divide train data and test data
offset = np.floor(data_size*0.8) # 80 percent data as training
train_data = shfl_data[:offset,:]
train_target = shfl_target[:offset]
test_data = shfl_data[offset+1:,:]
test_target = shfl_target[offset+1:]

# Build ELM Classifier using default value
elm_classifier = ELMClassifier()
# Train ELM
elm_classifier.fit(train_data,train_target)

predicted_class = elm_classifier.predict(test_data)
error_elm = 1.0 - elm_classifier.score(test_data, test_target)
print 'Predicted Class\t\t', predicted_class
print 'Original Class\t\t', test_target
print 'Error Score\t\t\t', error_elm
print ''

#print '##############################################################\nSINGLE GEN-ELM RESULT\n##############################################################'
from elm import GenELMClassifier
## Build GEN_ELM Classifier using default value
#genelm_classifier = GenELMClassifier()
## Train the ELM
#genelm_classifier.fit(train_data,train_target)
#
示例#10
0
    print "\nTime: %.3f secs" % (time() - start_time)

    print "Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (
        min(test_res), np.mean(test_res), max(test_res), np.std(test_res))
    print "Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (
        min(train_res), np.mean(train_res), max(train_res), np.std(train_res))
    print
    return (train_res, test_res)


stdsc = StandardScaler()

iris = load_iris()
irx, iry = stdsc.fit_transform(iris.data), iris.target
irx_train, irx_test, iry_train, iry_test = train_test_split(irx,
                                                            iry,
                                                            test_size=0.2)

srh = SimpleRandomHiddenLayer(activation_args='sigmoid', n_hidden=500)
elmc = ELMClassifier(hidden_layer=srh)
# elmc = ELMClassifier(SimpleRandomHiddenLayer(activation_func='sigmoid'))
# print "SimpleRandomHiddenLayer(activation_func='sigmoid')"
# tr,ts = res_dist(irx, iry, elmc, n_runs=100, random_state=0)
# plt.hist(tr),plt.hist(tr)
# plt.show()

elmc.fit(irx_train, iry_train)
r = elmc.predict(irx_test)
print r
res = elmc.score(irx_test, iry_test)
print res
示例#11
0
from elm import ELMClassifier
from sklearn import datasets
from elm import elmUtils, BvsbUtils
import numpy as np

#data = datasets.fetch_olivetti_faces()  # 这是第1个
# data = sklearn.datasets.fetch_covtype()#这是第2个
data = datasets.load_iris()  #这是第3个
# data = datasets.load_digits()#这是第4个  再前面加#屏蔽语句,把运行的打开
#data = datasets.load_wine()#这是第5个
# data = datasets.load_breast_cancer()#这是第6个

#data=elmUtils.coverDataFileToData("./data/abalone.data", targetIndex=-1, transformIndex=[0])
#data=elmUtils.readDataFileToData("data/balance-scale.data", targetIndex=0)
#data = datasets.fetch_olivetti_faces()  # 稀疏矩阵,必须转换和降维
# 数据集不全为数字时,needOneHot=True, target 不为数字时,needLabelEncoder=True
#data.data,data.target=elmUtils.processingData(data.data, data.target)
#data.data=BvsbUtils.dimensionReductionWithPCA(data.data,100) #kddcpu99 维度太高,必须进行降维
print(data.data.shape)
label_size = 0.3

(train_data, test_data) = elmUtils.splitData(data.data, data.target,
                                             1 - label_size)
elmc = ELMClassifier(n_hidden=1000,
                     activation_func='tanh',
                     alpha=1.0,
                     random_state=0)
elmc.fit(train_data[0], train_data[1])
print(elmc.score(test_data[0], test_data[1]))
示例#12
0
def simulate(trn, tst):
    start = time.time()
    b_tp = b_fp = b_tn = b_fn = 0
    s_tp = s_fp = s_tn = s_fn = 0
    b_min = s_min = 1000000
    b_max = s_max = 0
    b_money = s_money = 0
    b_money_vec = [0]
    s_money_vec = [0]
    b_gain = s_gain = 0
    b_loss = s_loss = 0
    b_draw = s_draw = 0
    b_gain_vec = []
    s_gain_vec = []
    b_loss_vec = []
    s_loss_vec = []
    b_max_drawdown = s_max_drawdown = 0
    b_pos = s_pos = False
    time_vec = []
    aux_ii = len(tst) - 1

    for t, val in enumerate(tst):
        start_i = time.time()

        if t == 201:
            continue

        if t == 0:
            tst[0, 5] = id.log_return(tst[0, 0], tst[0, 3], trn[-1, 0], trn[-1,
                                                                            3])
        else:
            tst[t, 5] = id.log_return(tst[t, 0], tst[t, 3], trn[t - 1, 0],
                                      trn[t - 1, 3])
        tst[t, 6] = mnm.get(val[5])
        tst[t, 7] = obv.get_obv(val[3], val[4])
        aux = bbs.sma(val[3])
        if aux is not None:
            tst[t, 8], tst[t, 9] = aux
        aux_9 = m_9.ema(val[3])
        aux12 = m12.ema(val[3])
        aux26 = m26.ema(val[3])
        tst[t, 10] = aux12 - aux26
        tst[t, 11] = tst[t, 10] - aux_9

        aux = trn[-1000:]
        aux_i = [(i[1] - mn[i[0]]) * mx[i[0]] for i in enumerate(tst[t, :12])]
        # aux_j = trn[-1000:, :]

        b_elm = ELMClassifier(random_state=0,
                              n_hidden=200,
                              activation_func='sigmoid',
                              alpha=0.0)
        b_elm.fit(aux[:, :12], aux[:, 12])
        b_res = b_elm.predict([aux_i[:12]])
        s_elm = ELMClassifier(random_state=0,
                              n_hidden=200,
                              activation_func='sigmoid',
                              alpha=0.0)
        s_elm.fit(aux[:, :12], aux[:, 13])
        s_res = s_elm.predict([aux_i[:12]])

        if b_res == 1.0:
            if val[12] == 1.0:
                b_tp += 1
            else:
                b_fp += 1
            if not b_pos:
                # Entra
                b_money -= val[3]
                b_pos = True
        else:
            if val[12] == 0.0:
                b_tn += 1
            else:
                b_fn += 1
            if b_pos:
                # Sai
                b_money += val[3]
                b_pos = False
                if b_money < b_money_vec[-1]:
                    b_loss += 1
                    b_loss_vec.append(b_money_vec[-1] - b_money)
                elif b_money > b_money_vec[-1]:
                    b_gain += 1
                    b_gain_vec.append(b_money - b_money_vec[-1])
                else:
                    b_draw += 1
        if val[14] == 1.0:
            # Sai
            b_money += val[3]
            b_pos = False
            if b_money < b_money_vec[-1]:
                b_loss += 1
                b_loss_vec.append(b_money_vec[-1] - b_money)
            elif b_money > b_money_vec[-1]:
                b_gain += 1
                b_gain_vec.append(b_money - b_money_vec[-1])
            else:
                b_draw += 1

        if b_pos:
            b_money_vec.append(b_money_vec[-1])
        else:
            b_money_vec.append(b_money)
            if b_money > b_max:
                b_max = b_money
            if b_money < b_min:
                b_min = b_money

        if s_res == 1.0:
            if val[13] == 1.0:
                s_tp += 1
            else:
                s_fp += 1
            if not s_pos:
                # Entra
                s_money += val[3]
                s_pos = True
        else:
            if val[13] == 0.0:
                s_tn += 1
            else:
                s_fn += 1
            if s_pos:
                # Sai
                s_money -= val[3]
                s_pos = False
                if s_money < s_money_vec[-1]:
                    s_loss += 1
                    s_loss_vec.append(s_money_vec[-1] - s_money)
                elif s_money > s_money_vec[-1]:
                    s_gain += 1
                    s_gain_vec.append(s_money - s_money_vec[-1])
                else:
                    s_draw += 1
        if val[14] == 1.0:
            # Sai
            s_money -= val[3]
            s_pos = False
            if s_money < s_money_vec[-1]:
                s_loss += 1
                s_loss_vec.append(s_money_vec[-1] - s_money)
            elif s_money > s_money_vec[-1]:
                s_gain += 1
                s_gain_vec.append(s_money - s_money_vec[-1])
            else:
                s_draw += 1

        if s_pos:
            s_money_vec.append(s_money_vec[-1])
        else:
            s_money_vec.append(s_money)
            if s_money > s_max:
                s_max = s_money
            if s_money < s_min:
                s_min = s_money

        # print(aux_i + list(tst[t, 12:]))
        trn = np.append(trn, [aux_i + list(tst[t, 12:])], axis=0)
        time_vec.append(time.time() - start_i)
        sys.stdout.write('\r' + '%6d / %d' % (t, aux_ii) + '\033[K')
    sys.stdout.write('\r' + '>> %6.2f: Simulation Done!\n\n' %
                     (time.time() - start) + '\033[K')

    print('#### ' + sys.argv[1] + ' ####')
    print('Tempo médio: %f' % np.mean(time_vec))
    print('Final      : %5.5f | %5.5f' % (b_money, s_money))
    # print('Final      : %5.5f | %5.5f' % (b_money_vec[-1], s_money_vec[-1]))
    print('Minimo     : %5.5f | %5.5f' % (b_min, s_min))
    print('Maximo     : %5.5f | %5.5f' % (b_max, s_max))
    print('Ganho qtd  : %10d | %10d' % (b_gain, s_gain))
    print('Perda qtd  : %10d | %10d' % (b_loss, s_loss))
    print('Empate qtd : %10d | %10d' % (b_draw, s_draw))
    print('Ganho medio: %5.5f | %5.5f' %
          (np.mean(b_gain_vec), np.mean(s_gain_vec)))
    print('Perda media: %5.5f | %5.5f' %
          (np.mean(b_loss_vec), np.mean(s_loss_vec)))
    print('TP         : %10d | %10d' % (b_tp, s_tp))
    print('FP         : %10d | %10d' % (b_fp, s_fp))
    print('TN         : %10d | %10d' % (b_tn, s_tn))
    print('FN         : %10d | %10d' % (b_fn, s_fn))

    plot(b_money_vec, s_money_vec, sys.argv[1], tst[:, 3])
示例#13
0
from elm import ELMClassifier
from sklearn import linear_model


def load_mnist(path='../Data/mnist.pkl'):
    with open(path, 'rb') as f:
        return cPickle.load(f)


def get_datasets(data):
    _train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
    _val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
    _test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)

    return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y


if __name__ == '__main__':
    # Load data sets
    train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
    # Build ELM
    cls = ELMClassifier(n_hidden=7000,
                        alpha=0.93,
                        activation_func='multiquadric',
                        regressor=linear_model.Ridge(),
                        random_state=21398023)
    cls.fit(train_x, train_y)
    # Evaluate model
    print 'Validation error:', cls.score(val_x, val_y)
    print 'Test error:', cls.score(test_x, test_y)
示例#14
0
#Make the output from training data
y = np.zeros(len(trainData) - 3)
for i in range(0, len(trainData) - 3):
    if trainData['changerange'][i+3] >= 0:
        y[i] = 1
    else:
        y[i] = -1

srhl_tanh = SimpleRandomHiddenLayer(n_hidden=10, activation_func='tanh', random_state=0)
srhl_rbf = RBFRandomHiddenLayer(n_hidden=200*2, gamma=0.1, random_state=0)
#create ELM instance
reg = ELMRegressor(srhl_tanh)
cla = ELMClassifier(srhl_tanh)
#fit the data
reg = reg.fit(X, y)
cla = cla.fit(X, y)

#Read testing data from testing.csv
testData = pd.read_csv('testing.csv')
pdt = np.zeros(len(testData) - 3)
pdt2 = np.zeros(len(testData) - 3)

for i in range(len(testData) - 3):
    ftr1 = float(testData['close'][i]) - float(testData['open'][i])
    ftr2 = float(testData['close'][i+1]) - float(testData['open'][i+1])
    ftr3 = float(testData['close'][i+2]) - float(testData['open'][i+2])
    ftr4 = float(testData['highest'][i] - float(testData['lowest'][i]))
    ftr5 = float(testData['highest'][i+1] - float(testData['lowest'][i+1]))
    ftr6 = float(testData['highest'][i+2] - float(testData['lowest'][i+2]))
    pdt[i] = reg.predict([ftr1, ftr2, ftr3, ftr4, ftr5, ftr6])
    pdt2[i] = cla.predict([ftr1, ftr2, ftr3, ftr4, ftr5, ftr6])
示例#15
0
tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
hist(tr), hist(ts)
print

rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
"""
# <codecell>

elmc = ELMClassifier(n_hidden=1000,
                     activation_func='multiquadric',
                     alpha=0.001,
                     random_state=0)
elmc.fit(cusx_train, cusy_train)
print elmc.score(cusx_train, cusy_train), elmc.score(cusx_test, cusy_test)

# <codecell>
"""
elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)

# <codecell>

elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
示例#16
0
    def run(self):

        rawDataset = open('../../selectedUserMore', 'r')

        # Value 0 represents max number of apps => It must be set
        maxValuesByFeature = [6, 4, 2, 2, 2, 0, 4, 5, 3]

        datasetFeatures = []
        datasetLabels = []

        maxApps = self.findMaxNumberOfApps(rawDataset)

        # Setting max number of apps
        maxValuesByFeature[5] = maxApps

        rawDataset.seek(0)
        for line in rawDataset:
            tuples = line.split(';')

            for t in range(len(tuples)):

                tup = tuples[t]
                vals = tup.split(',')
                label = vals[0]
                features = vals[1].split('_')

                datasetFeatures.append(features)
                datasetLabels.append(label)

        rawDataset.close()

        nTuples = len(datasetLabels)

        nTrain = int(0.7 * nTuples)

        nMaxHidden = 3005
        totalAccuracy = 0.0

        trainSetFeatures = np.array(datasetFeatures[:nTrain], dtype=np.int)
        trainSetLabels = np.array(datasetLabels[:nTrain], dtype=np.int)

        testSetFeatures = np.array(datasetFeatures[nTrain:], dtype=np.int)
        testSetLabels = np.array(datasetLabels[nTrain:], dtype=np.int)

        # for x in range(200,nMaxHidden):

        ffsn = ELMClassifier(n_hidden=1000, activation_func='sine')
        ffsn.fit(trainSetFeatures, trainSetLabels)

        results = ffsn.predict(testSetFeatures)

        nPredictedCorrectly = 0

        for test in range(len(testSetLabels)):
            realValue = testSetLabels[test]
            predictedValue = results[test]

            if (int(realValue) == int(predictedValue)):
                nPredictedCorrectly += 1

            # print "Real: " + str(realValue) + " / Predicted: " + str(predictedValue)

        totalTests = nTuples - nTrain
        accuracy = float(nPredictedCorrectly) / totalTests
        # print "N Hidden: " + str(x) + " / Accuracy: " + str(accuracy)
        print "Accuracy: " + str(accuracy)
        totalAccuracy += accuracy
示例#17
0
	def run(self):

		rawDataset = open('../../selectedUserMore', 'r')

		# Value 0 represents max number of apps => It must be set
		maxValuesByFeature = [6,4,2,2,2,0,4,5,3]

		datasetFeatures = []
		datasetLabels = []

		maxApps = self.findMaxNumberOfApps(rawDataset)

		# Setting max number of apps
		maxValuesByFeature[5] = maxApps

		rawDataset.seek(0)
		for line in rawDataset:
			tuples = line.split(';')

			for t in range(len(tuples)):

				
				listFeatureNeurons = []
				listLabelNeurons = []

				tup = tuples[t]
				vals = tup.split(',')
				label = vals[0]
				features = vals[1].split('_')
				
				for feature in range(len(features)):
					val = features[feature]
					listFeatureNeurons += self.createNeuronInput(val, maxValuesByFeature[feature])
				
				listLabelNeurons += self.createNeuronInput(label, maxApps)

				datasetFeatures.append(listFeatureNeurons)
				datasetLabels.append(listLabelNeurons)

		rawDataset.close()		

		nTuples = len(datasetLabels)

		
		nTrain = int(0.7 * nTuples)
				
		nMaxHidden = 3005
		totalAccuracy = 0.0

		trainSetFeatures = datasetFeatures[:nTrain]
		trainSetLabels = datasetLabels[:nTrain]

		testSetFeatures = datasetFeatures[nTrain:]
		testSetLabels = datasetLabels[nTrain:]


		# for x in range(200,nMaxHidden):
		
		print testSetLabels
			
		ffsn = ELMClassifier(n_hidden=100, activation_func='sigmoid')
		ffsn.fit(trainSetFeatures, trainSetLabels)
示例#18
0
nh = 10
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
rhl = RBFRandomHiddenLayer(n_hidden=nh, activation_func='poly_spline', gamma=3)
#rhl = RBFRandomHiddenLayer(n_hidden=nh, activation_func='multiquadric', gamma=1)
#rhl = RBFRandomHiddenLayer(n_hidden=nh, centers=ctrs, radii=unit_rs, gamma=4)
elmr = ELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomHiddenLayer(n_hidden=100, random_state=0, gamma=0.1)
elmc_rbf = ELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)


def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)


#tanh_rhl = SimpleRandomHiddenLayer(n_hidden=5000, random_state=0)
tanh_rhl = SimpleRandomHiddenLayer(n_hidden=5000,
                                   activation_func=powtanh_xfer,
                                   activation_args={'power': 2.0})
elmc_tanh = ELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train,
                      dgy_train), elmc_tanh.score(dgx_test, dgy_test)