Exemple #1
0
def addData(dimensions, startRow, numRows):
    model0 = hpelm.HPELM(dimensions[0], dimensions[1])
    model0.load('/Shared/bdagroup3/pmodelMaster1500.hf')
    model0.add_data('/Shared/bdagroup3/FaceSkinDataset2/XTrainUJ.h5',
                    '/Shared/bdagroup3/FaceSkinDataset2/TTrainUJ.h5',
                    istart=startRow,
                    icount=numRows,
                    fHH="/Shared/bdagroup3/DONOTTOUCH/HH.h5",
                    fHT="/Shared/bdagroup3/DONOTTOUCH/HT.h5")
Exemple #2
0
def setupHPELM():
    model0 = hpelm.HPELM(75,
                         75,
                         precision='double',
                         classification='c',
                         tprint=30)
    model0.add_neurons(1000, 'sigm')
    model0.save('/Shared/bdagroup3/model1000.hf')
    return model0
Exemple #3
0
 def test_HPELM_Sine_BetterThanNaive(self):
     X = "sine/sine_x.h5"
     T = "sine/sine_t.h5"
     Y = "sine/sine_y.h5"
     elm = hpelm.HPELM(1, 1)
     elm.add_neurons(10, "sigm")
     elm.train(X, T)
     elm.predict(X, Y)
     err = elm.error(Y, T)
     self.assertLess(err, 1)
def train():
    for k in range(K):
        X,Y = get_train_data(k)
        elm = hpelm.HPELM(inputs=input_size, outputs=7)
        elm.add_neurons(hidden_num, 'sigm')
        elm.train(X,Y)
        weight = test(elm, X_train, cad_y_train)
        test(elm, X_val, cad_y_val)
        elm_weight.append(weight)
        print ('elm_'+str(k)+' weight:'+str(weight))
        elm.save('./data/adapt_elm'+str(K)+ '/elm_'+str(k))
Exemple #5
0
def setupHPELM():
    # TODO: Complete parallel trial run.
    # TODO: Complete 3x3, 5x5, 9x9 preprocessing.
    # TODO: Ensemble the above models.
    model0 = hpelm.HPELM(75,
                         75,
                         precision='double',
                         classification='c',
                         tprint=30)
    model0.add_neurons(1500, 'sigm')
    model0.save('/Shared/bdagroup3/pmodelMaster1500.hf')
def train():
    for k in range(K):
        X, Y = get_train_data(k)
        elm = hpelm.HPELM(inputs=input_size, outputs=8)
        elm.add_neurons(hidden_num, 'sigm')
        elm.train(X, Y)
        weight = test(elm)
        elm_weight.append(weight)
        print('elm_' + str(k) + ' weight:' + str(weight))
        print('sum:' + str(np.sum(sample_weight)))
        elm.save('./data/adapt_elm/elm_' + str(k))
Exemple #7
0
def train():
    for ty in range(3):
        elm_weight = []
        for k in range(K):
            X, Y = get_train_data(k, ty)
            elm = hpelm.HPELM(inputs=features_train[ty].shape[1], outputs=8)
            elm.add_neurons(hidden_num, 'sigm')
            elm.train(X, Y)
            weight = test(elm, ty)
            elm_weight.append(weight)
            print('elm_' + str(k) + ' weight:' + str(weight))
            elm.save('./data/adapt_elm/elm_' + str(ty) + '_' + str(k))
        np.save('./data/adapt_elm/weight_' + str(ty) + '.npy', elm_weight)
Exemple #8
0
def predict_elm(X_matrix_train, train_labels, X_matrix_test):
    n_input_columns = len(X_matrix_train.columns)
    n_neurons = int(1 * n_input_columns)

    X_matrix_train = X_matrix_train.to_numpy()
    X_matrix_test = X_matrix_test.to_numpy()
    train_labels = train_labels.to_numpy()

    model = hpelm.HPELM(n_input_columns, 1)
    model.add_neurons(n_neurons, 'tanh')

    model.train(X_matrix_train, train_labels)

    return model.predict(X_matrix_test).flatten()
Exemple #9
0
def takeMeThurrr(dimensions):
    model0 = hpelm.HPELM(dimensions[0], dimensions[1])
    model0.load('/Shared/bdagroup3/pmodelMaster1500.hf')
    model0.solve_corr("/Shared/bdagroup3/DONOTTOUCH/HH.h5",
                      "/Shared/bdagroup3/DONOTTOUCH/HT.h5")
    print("Solution found.")
    model0.save('/Shared/bdagroup3/pmodelMaster1500.hf')
    print("Model saved.")
    finalModel = hpelm.HPELM(dimensions[0], dimensions[1])
    finalModel.load('/Shared/bdagroup3/pmodelMaster1500.hf')
    finalModel.predict('/Shared/bdagroup3/FaceSkinDataset2/XTrainUJ.h5',
                       '/Shared/bdagroup3/FaceSkinDataset2/pYTrainUJ.h5')
    err_train = finalModel.error(
        "/Shared/bdagroup3/FaceSkinDataset2/pYTrainUJ.h5",
        '/Shared/bdagroup3/FaceSkinDataset2/TTrainUJ.h5')
    print('Classification Training Error: ' + str(err_train))

    finalModel.predict('/Shared/bdagroup3/FaceSkinDataset2/XTestUJ.h5',
                       '/Shared/bdagroup3/FaceSkinDataset2/pYTestUJ.h5')
    err_test = finalModel.error(
        "/Shared/bdagroup3/FaceSkinDataset2/pYTestUJ.h5",
        '/Shared/bdagroup3/FaceSkinDataset2/TTestUJ.h5')
    print('Classification Test Error: ' + str(err_test))
def train():
    now_time = 0
    for k in range(K):
        X, Y = get_train_data(k)
        elm = hpelm.HPELM(inputs=input_size, outputs=8)
        elm.add_neurons(hidden_num, 'sigm')
        starttime = datetime.datetime.now()
        elm.train(X, Y)
        endtime = datetime.datetime.now()
        now_time += (endtime - starttime).seconds
        weight = test(elm, X_train, Y_train)
        #test(elm, X_val, Y_val)
        elm_weight.append(weight)
        print('elm_' + str(k) + ' weight:' + str(weight))
        elm.save('./data/adapt_elm' + str(K) + '/elm_' + str(k))
        return now_time
Exemple #11
0
def baggingELM(file1, file2, file3, file4, models):
	# load data
	Xtr = np.load(file1)
	Xts = np.load(file2)
	Ttr = np.load(file3)
	Tts = np.load(file4)

	# keep predictions
	YtsMaxList = []

	basemodels = models

	# train ELMs
	t1 = timeit.default_timer()
	for i in range(basemodels):
		print(i)
		# generate random dataset
		rXtr, rTtr = bootstrapAggregating(Xtr, Ttr)

		model = hpelm.HPELM(rXtr.shape[1], rTtr.shape[1])
		model.add_neurons(1000, 'sigm')

		# prep weight matrix
		w = np.zeros((rTtr.shape[1],))
		w[0] += 9
		w[1] += 1.125

		model.train(rXtr, rTtr, "wc")
		# model.save("ELMmodelWeighted_bagging_%d.h5" % i)

		# make prediction
		# print(Xts[0].reshape(1,-1).shape)
		Yts = model.predict(Xts)

		# evaluate classification results
		YtsMax = np.argmax(Yts, 1)
		listYts =list(YtsMax)
		YtsMaxList.extend([listYts])

	# time
	t2 = timeit.default_timer()
	trainingTime = t2 - t1

	TtsMax = np.argmax(Tts, 1)

	# np.savetxt("baggingELM_predictions_1models.csv", numerator, delimiter=",")
	return YtsMaxList, TtsMax, trainingTime
def test_adapt_elm(X,Y):
    result = []
    for k in range(K):
        elm = hpelm.HPELM(inputs=input_size, outputs=7)
        elm.add_neurons(hidden_num, 'sigm')
        elm.load('./data/adapt_elm'+str(K)+'/elm_'+str(k))

        predict = elm.predict(X)*elm_weight[k]
        if k==0:
            result = predict
        else:
            result = result + predict
    counter = 0.0
    for r,t in zip(result, Y):
        if np.argmax(r)==np.argmax(t):
            counter+=1
    right_rate = counter/X.shape[0]
    print('adapt elm right rate:'+str(right_rate))
def test_adapt_vote_elm(X, Y):
    result = np.zeros((X.shape[0], 8))
    for k in range(K):
        elm = hpelm.HPELM(inputs=input_size, outputs=8)
        elm.add_neurons(hidden_num, 'sigm')
        elm.load('./data/adapt_elm/elm_' + str(k))

        predict = elm.predict(X)
        max = np.argmax(predict, axis=1)
        for n in range(X.shape[0]):
            result[n][max[n]] += 1

    counter = 0.0
    for r, t in zip(result, Y):
        if np.argmax(r) == np.argmax(t):
            counter += 1
    right_rate = counter / X.shape[0]
    print('adapt elm right rate:' + str(right_rate))
Exemple #14
0
def multipleELM(file1, file2, file3, file4, models):
    # load data
    Xtr = np.load(file1)
    Xts = np.load(file2)
    Ttr = np.load(file3)
    Tts = np.load(file4)

    # keep predictions
    YtsMaxList = []

    basemodels = models

    # train ELMs
    t1 = timeit.default_timer()
    for i in range(basemodels):
        print(i)

        model = hpelm.HPELM(Xtr.shape[1], Ttr.shape[1])
        model.add_neurons(1000, 'sigm')

        # prep weight matrix
        w = np.zeros((Ttr.shape[1], ))
        w[0] += 9
        w[1] += 1.125

        model.train(Xtr, Ttr, "wc")
        # model.save("ELMmodelWeighted_bagging_%d.h5" % i)

        # make prediction
        Yts = model.predict(Xts)

        # evaluate classification results
        YtsMax = np.argmax(Yts, 1)
        listYts = list(YtsMax)
        YtsMaxList.extend([listYts])

    # time
    t2 = timeit.default_timer()
    trainingTime = t2 - t1

    TtsMax = np.argmax(Tts, 1)

    return YtsMaxList, TtsMax, trainingTime
def get_train_data(k):
    x = []
    y = []
    if k == 0:
        return cnn_X_train, Y_train
    else:
        elm = hpelm.HPELM(inputs=input_size, outputs=8)
        elm.add_neurons(hidden_num, 'sigm')
        elm.load('./data/adapt_elm/elm_' + str(k - 1))

        new_x, new_y = test_update_weight(elm)
        assert new_x.__len__() == new_y.__len__()
        wrong_num = new_x.__len__()
        rand = np.random.randint(0, train_sample_num, size=int(wrong_num))
        for i in rand:
            new_x.append(cnn_X_train[i])
            new_y.append(Y_train[i])

        new_x = np.array(new_x)
        new_y = np.array(new_y)
        return new_x, new_y
Exemple #16
0
def create_model(X_df, y):
    print(f"Building ELM models...", end=' ', flush=True)
    n_input_columns = len(X_df.columns)
    X_matrix = X_df.to_numpy()
    y = y.to_numpy()

    n_neurons = int(1 * n_input_columns)
    model = hpelm.HPELM(n_input_columns, 1)
    model.add_neurons(n_neurons, 'tanh')

    n_runs = 10
    mae = 0
    for i in range(n_runs):
        X_train, X_test, y_train, y_test = train_test_split(X_matrix, y, test_size=.2)
        model.train(X_train, y_train)
        preds = model.predict(X_test)
        mae += get_MAE(preds, y_test)
    mae /= n_runs

    print(f"MAE for n_neurons={n_neurons}: {mae:.3f}")

    
    print(colored(f'Done', 'green'))
def test_adapt_elm(X, Y):
    result = []
    for k in range(K):
        elm = hpelm.HPELM(inputs=input_size, outputs=8)
        elm.add_neurons(hidden_num, 'sigm')
        elm.load('./data/adapt_elm' + str(K) + '/elm_' + str(k))

        predict = elm.predict(X) * elm_weight[k]
        if k == 0:
            result = predict
        else:
            result = result + predict
    counter = 0.0
    eachClassRight = [.0 for i in range(8)]
    for r, t in zip(result, Y):
        if np.argmax(r) == np.argmax(t):
            counter += 1
            eachClassRight[np.argmax(r)] += 1
    con_mat = confusion_matrix(result, Y)
    print(con_mat)
    np.save('./con_mat3.npy', con_mat)
    right_rate = counter / X.shape[0]
    print('adapt elm right rate:' + str(right_rate))
Exemple #18
0
def get_train_data(k, ty):
    x = []
    y = []
    X_train = features_train[ty]
    if k == 0:
        return X_train, Y_train
    else:
        elm = hpelm.HPELM(inputs=X_train.shape[1], outputs=8)
        elm.add_neurons(hidden_num, 'sigm')
        elm.load('./data/adapt_elm/elm_' + str(ty) + '_' + str(k - 1))

        new_x, new_y = test_update_weight(elm, ty)
        assert new_x.__len__() == new_y.__len__()
        wrong_num = new_x.__len__()
        s_num = 0.8 * train_sample_num - wrong_num
        if s_num > 0:
            rand = np.random.randint(0, train_sample_num, size=int(s_num))
            for i in rand:
                new_x.append(X_train[i])
                new_y.append(Y_train[i])

        new_x = np.array(new_x)
        new_y = np.array(new_y)
        return new_x, new_y
Exemple #19
0
def addData(dimensions, startRow, numRows):
    # DO NOT PROVIDE numRows arg to model.add_data, so that it uses all the rest of the data!
    model0 = hpelm.HPELM(dimensions[0], dimensions[1])
    model0.load('/Shared/bdagroup3/pmodelMaster1500.hf')
    model0.add_data('/Shared/bdagroup3/FaceSkinDataset2/XTrainUJ.h5', '/Shared/bdagroup3/FaceSkinDataset2/TTrainUJ.h5',
                    istart=startRow, fHH="/Shared/bdagroup3/DONOTTOUCH/HH.h5", fHT="/Shared/bdagroup3/DONOTTOUCH/HT.h5")
Exemple #20
0
import hpelm
import numpy as np
import hpelm.modules
import csv

# FUNCTION ELM hyperparameter tunning

model = hpelm.HPELM(2240, 2, classification='wc')  # wc uses [9. 1.125] as w
model.add_neurons(500, 'sigm')

# compute HH and HT matrices
model.add_data('train_x_0.hdf5',
               'train_t_0.hdf5',
               fHH='HH0.hdf5',
               fHT='HT0.hdf5')
l, e, m = model.validation_corr('HH0.hdf5',
                                'HT0.hdf5',
                                'validation_x_0.hdf5',
                                'validation_t_0.hdf5',
                                steps=100)

with open('confusion0.txt', 'w') as f:
    csvwriter = csv.writer(f)
    csvwriter.writerows(m)

csvData = []
csvData.append(l)
csvData.append(e)
with open('dataset0.csv', 'w') as csvFile:
    writer = csv.writer(csvFile)
    writer.writerows(csvData)
Exemple #21
0
    

    hog_elm = hpelm.HPELM(inputs=hog_feature_size, outputs=category_num)
    hog_elm.add_neurons(hidden_num, 'sigm')
    #hog_elm.add_neurons(512, 'rbf2')
    hog_elm.train(hog_X_train, Y_train)

    test(hog_elm, hog_X_train, Y_train, type='hog_train')
    test(hog_elm, cad_hog_X_val, cad_y_val, type='hog_val')
    ''' ''
    '''''
    -------------------hist elm-------------------
    
    print ('hist_X_train.shape:',cad_hist_X_train.shape)
    hist_elm = hpelm.HPELM(inputs=hist_size, outputs=category_num)
    hist_elm.add_neurons(hidden_num, 'sigm')
    #hog_elm.add_neurons(512, 'rbf2')
    hist_elm.train(cad_hist_X_train, cad_y_train)

    test(hist_elm, cad_hist_X_train, cad_y_train, type='hist_train')
    test(hist_elm, cad_hist_X_val, cad_y_val, type='hist_val')
    ''' ''
    '''''
    -------------------cnn elm-------------------
    ''' ''
    cnn_elm = hpelm.HPELM(inputs=cnn_size, outputs=category_num)
    cnn_elm.add_neurons(hidden_num, 'sigm')
    #hog_elm.add_neurons(512, 'rbf2')
    cnn_elm.train(cnn_X_train, Y_train)
    test(cnn_elm, cnn_X_train, Y_train, type='cnn_train')
    #test(cnn_elm, cnn_X_val, Y_val, type='cnn_val')
Exemple #22
0
def boostingELM(file1, file2, file3, file4, model):
    # load data
    Xtr = np.load(file1)
    Xts = np.load(file2)
    Ttr = np.load(file3)
    Tts = np.load(file4)

    # initialize weights
    instanceNumber = Xtr.shape[0]
    uniformDist = 1 / instanceNumber
    weights = np.ones((1, instanceNumber)) * uniformDist

    # keep
    alphaList = []
    YtrMaxList = []
    YtsMaxList = []

    basemodels = model

    t1 = timeit.default_timer()
    for i in range(basemodels):
        # get weighted instances data
        rXtr, rTtr = instanceWeighting(Xtr, Ttr, weights)

        #train model
        model = hpelm.HPELM(rXtr.shape[1], rTtr.shape[1])
        model.add_neurons(1000, 'sigm')

        # prep weight matrix
        w = np.zeros((rTtr.shape[1], ))
        w[0] += 9
        w[1] += 1.125

        model.train(rXtr, rTtr, "wc")
        # model.save("ELMmodelWeighted_boosting_%d.h5" % i)

        # make prediction
        Ytr = model.predict(Xtr)
        Yts = model.predict(Xts)

        # evaluate classification results
        YtrMax = np.argmax(Ytr, 1)
        listYtr = list(YtrMax)
        YtrMaxList.extend([listYtr])
        YtsMax = np.argmax(Yts, 1)
        listYts = list(YtsMax)
        YtsMaxList.extend([listYts])
        TtrMax = np.argmax(Ttr, 1)

        # majority voted
        join = np.vstack(YtrMaxList)
        sum = np.sum(join, axis=0)
        l = join.shape[0]
        YtrVoted = [1 if l - e < l / 2 else 0 for e in sum]
        YtrVoted = np.array(YtrVoted)

        # update weights
        weights = np.ones((1, instanceNumber)) / instanceNumber
        totalError = np.sum(np.multiply((YtrVoted != TtrMax), weights))
        if totalError < 0.5:
            print('pass')
            alpha = (1 / 2) * np.log((1 - totalError) / totalError)
            alphaList.append(alpha)
            seq = [
                1 if YtrVoted[i] == TtrMax[i] else -1
                for i in range(instanceNumber)
            ]
            weights = np.multiply(np.exp(np.multiply(-alpha, seq)), weights)
            weightsum = np.sum(weights)
            weights = weights / weightsum

    TtsMax = np.argmax(Tts, 1)
    t2 = timeit.default_timer()
    trainingTime = t2 - t1

    return YtsMaxList, alphaList, TtsMax, trainingTime
Exemple #23
0
def auxELMStdReduced(folder1, hiddenNeurons, neuronType):
    # def auxELMStdReduced(folder1, folder2, hiddenNeurons, neuronType):
    # Xts = np.load(os.path.join(folder2, "Xts_reducedStd.npy"))
    # Tts = np.load(os.path.join(folder2, "Tts_reducedStd.npy"))
    folder1 = os.path.join(os.path.dirname(__file__), folder1)
    # folder2 = os.path.join(os.path.dirname(__file__), folder2)
    accuracy = []
    recall = []
    precision = []
    trainingTimeList = []

    for i in range(20):
        Xtr = np.load(os.path.join(folder1, "reducedStd_train_x_%d.npy" % i))
        Xts = np.load(os.path.join(folder1, "reducedStd_test_x_%d.npy" % i))
        Ttr = np.load(os.path.join(folder1, "reducedStd_train_t_%d.npy" % i))
        Tts = np.load(os.path.join(folder1, "reducedStd_test_t_%d.npy" % i))

        # prep weight matrix
        w = np.zeros((Ttr.shape[1], ))
        w[0] += 9
        w[1] += 1.125

        # train model
        model = hpelm.HPELM(Xtr.shape[1], Ttr.shape[1])
        model.add_neurons(hiddenNeurons, neuronType)

        # training speed
        t1 = timeit.default_timer()

        model.train(Xtr, Ttr, 'wc', w=w)
        t2 = timeit.default_timer()
        trainingTime = t2 - t1
        trainingTimeList.append(trainingTime)

        # model.save("ELMmodelWeighted_1000N_reducedStd_%d.h5" % i)

        # make prediction
        Yts = model.predict(Xts)

        # evaluate classification results
        TtsMax = np.argmax(Tts, 1)
        YtsMax = np.argmax(Yts, 1)

        accuracy.append(float(np.sum(YtsMax == TtsMax)) / TtsMax.shape[0])
        tp, fn, fp, tn = confusion_matrix(TtsMax, YtsMax).ravel()
        recall.append(float(tp / (tp + fn)))
        precision.append(float(tp / (tp + fp)))

    # save metrics vectors
    df_accuracy = pd.DataFrame(data=accuracy, columns=["accuracy"])
    df_recall = pd.DataFrame(data=recall, columns=["recall"])
    df_precision = pd.DataFrame(data=precision, columns=["precision"])
    df_time = pd.DataFrame(data=trainingTimeList, columns=["training time"])
    concatinate_metrics = pd.concat(
        [df_accuracy, df_recall, df_precision, df_time], axis=1)
    df_metrics = pd.DataFrame(
        data=concatinate_metrics,
        columns=["accuracy", "recall", "precision", "training time"])

    df_metrics.to_csv("reducedStd_CV_metrics.csv", index=False)
    return accuracy, recall, precision
---------------------cnn hist elm-------------------------

cnn_hist_elm = hpelm.HPELM(inputs = late_fusion_cnn_hist_train.shape[1], outputs=category_num)
cnn_hist_elm.add_neurons(hidden_num, 'sigm')
cnn_hist_elm.train(late_fusion_cnn_hist_train, Y_train)
test(cnn_hist_elm, late_fusion_cnn_hist_train, Y_train, type='cnn+hist train')
test(cnn_hist_elm, late_fusion_cnn_hist_val, Y_val, type='cnn+hist val')
''' ''
'''''
---------------------cnn hog hist elm-------------------------
''' ''

hidden_num_s = [4096]
times = []
for hidden_num in hidden_num_s:
    cnn_hist_elm = hpelm.HPELM(inputs=late_fusion_cnn_hog_hist_train.shape[1],
                               outputs=category_num)
    cnn_hist_elm.add_neurons(hidden_num, 'sigm')

    starttime = datetime.datetime.now()
    cnn_hist_elm.train(late_fusion_cnn_hog_hist_train, Y_train)
    endtime = datetime.datetime.now()
    now_time = (endtime - starttime).seconds
    times.append(now_time)
    test(cnn_hist_elm,
         late_fusion_cnn_hog_hist_train,
         Y_train,
         type='cnn+hog+hist train')
    test(cnn_hist_elm,
         late_fusion_cnn_hog_hist_val,
         Y_val,
         type='cnn+hog+hist val')