m = model_setup.modelDict[model](input_shape, rreg, breg, kreg)
    histories = list()
    testData = list()

    batch_size = 10
    epochs = 400
    for currData in Data:
        X_ts, labels = pp.balanceSlicedData(currData[0], currData[1], target=50, distributed_Output=True, COLUMN_ID='stopId')
        TrainData, TestData = pp.splitDataPandasFormat(X_ts, labels, split=0.3)
        X = pp.shape_Data_to_LSTM_format(TrainData[0], dropChannels, scale=DataScaling)
        y = pp.shape_Labels_to_LSTM_format(TrainData[1])
        callback = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=StopPatience, verbose=1,
                                           mode='auto')
        if X.shape[0] >= batch_size:
            testData.append(TestData)
            histories.append(m.fit(X, y, validation_split=0.2, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[callback]))

    print('\n results of ' + RunName + '  on Model ' + model + '  with data set ' + filename)
    print('\n epochs: ' + str(epochs) + '\n batch size: ' + str(batch_size) + '\n stop patience:' + str(
        StopPatience) + ' \n scaling: ' + str(DataScaling))
    print('\n bias-regu: ' + 'l1 %.2f,l2 %.2f' % (breg.l1, breg.l2))
    print('\n kernel-regu: ' + 'l1 %.2f,l2 %.2f' % (kreg.l1, kreg.l2))
    print('\n recu-regu: ' + 'l1 %.2f,l2 %.2f' % (rreg.l1, rreg.l2))

    d_Eval.get_overall_results(testData, m, data_pd=True, dropChannels=dropChannels)

    m_Eval.eval_all(histories, epochs, RunName, m, Savepath, testData)



Exemple #2
0
                                       mode='auto')
    history = m.fit(X_train,
                    y_train,
                    validation_split=0.2,
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=2,
                    callbacks=[callback])

    print('\n results of ' + RunName + '  on Model ' + model +
          '  with data set ' + Testfilename)
    print('\n epochs: ' + str(epochs) + '\n batch size: ' + str(batch_size) +
          '\n stop patience:' + str(StopPatience) + ' \n scaling: ' +
          str(DataScaling))

    FP, FN, TP, TN = d_Eval.get_overall_results([(X_test, y_test)], m)
    m_Eval.eval_all([history], epochs, RunName, m, Savepath, TestData)
    MCC = d_Eval.get_MCC(FP, FN, TP, TN)
    print('&y&' + str(MCC)[0:4] + '&' + str(TP) + '&' + str(TN) + '&' +
          str(FP) + '&' + str(FN) + '\\' + '\\')

    SaveInfo.loc[RunName, 'MCC'] = MCC
    SaveInfo.loc[RunName, 'TP'] = TP
    SaveInfo.loc[RunName, 'TN'] = TN
    SaveInfo.loc[RunName, 'FP'] = FP
    SaveInfo.loc[RunName, 'FN'] = FN
    SaveInfo.loc[RunName, 'model'] = model

###### find best Models
NumberOfModels = 10
ModelNameList = []
Exemple #3
0
import pickle
import numpy as np
import pandas as pd
from keras.models import load_model
from Libraries import data_preprocessing as pp
from Libraries import data_evaluation as d_Eval

ModelPath = '/media/computations/DATA/ExperimentalData/Runs/156417/'
ModelName = 'cross4L_16model'
m = load_model(ModelPath+ModelName+'.h5')

DataSetPath = '/media/computations/DATA/ExperimentalData/DataFiles/systemABCD/'
#TestDataSets = ['center8s_pad_B_TestDataPandas', 'center8s_pad_B_TrainDataPandas', 'center8s_pad_D_TestDataPandas', 'center8s_pad_D_TrainDataPandas', 'center8s_pad_C_TestDataPandas', 'center8s_pad_C_TrainDataPandas']
#TestDataSets = ['center8s_pad_TestDataPandas']
TestDataSets = ['center8s_pad_D_TestDataPandas']

TestData = pd.DataFrame()
TestLabel = pd.DataFrame()

dropChannels = ['time', 'stopId']

for name in TestDataSets:
    Data = pickle.load(open(DataSetPath + name + '.p', 'rb'))
    TestData = TestData.append(Data[0])
    TestLabel = TestLabel.append(Data[1])

TestData = pp.shape_Data_to_LSTM_format(TestData, dropChannels=dropChannels)
TestLabel = pp.reduceNumpyTD(pp.shape_Labels_to_LSTM_format(TestLabel))

FP, FN, TP, TN = d_Eval.get_overall_results([(TestData, TestLabel)], m)
MCC = d_Eval.get_MCC(FP, FN, TP, TN)
Exemple #4
0
    dropChannels = ['time', 'stopId', 'trg1', 'n1', 'trot1', 'tlin1', 'tlin2', 'tamb1']
    X, y = pp.balanceSlicedData(Data[0], Data[1], target=50, distributed_Output=False)

    X = pp.shape_Data_to_LSTM_format(X, dropChannels, scale=DataScaling)
    # y = pp.reduceLabel(y).values           ## not needed if distributed Output in balance sliced Data == false
    y = y.values
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
    class_weight = pp.getClassWeight_Dict(y)

    epochs = 1
    batch_size = 1

    m = Sequential()
    input_shape = (X.shape[1], X.shape[2])
    m = model_setup.modelDict[model](input_shape)

    callback = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=StopPatience, verbose=1, mode='auto')

    history = m.fit(X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=batch_size, verbose=2,
                    class_weight=class_weight, callbacks=[callback])

    print('\n results of ' + RunName + '  on Model ' + model + '  with data set ' + filename)
    print('\n epochs: ' + str(epochs) + '\n batch size: ' + str(batch_size) + '\n stop patience:' + str(
        StopPatience) + ' \n scaling: ' + str(DataScaling))
    print('\n squeals are weighted with ' + str(class_weight[1]))
    d_Eval.get_overall_results([(X_test, y_test)], m)
    m_Eval.eval_all([history], epochs, RunName, m, Savepath, (X_test, y_test))



Exemple #5
0
test_data = list()
epochs = 100
for currData in Data:
    seed = 0
    X = pp.shape_Data_to_LSTM_format(currData[0], dropChannels)
    y = pp.shape_Labels_to_LSTM_format(currData[1])
    #y = np.reshape(pp.reduceLabel(y).values, (X.shape[0], 1, 1))
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=seed)
    batch_size = 5
    if X_train.shape[0] >= 2:
        hist = m.fit(X_train,
                     y_train,
                     validation_split=0.2,
                     epochs=epochs,
                     batch_size=batch_size,
                     verbose=2)
        test_data.append((X_test, y_test))

m.save('my_model.h5')
json_string = m.to_json()
FP, FN, TP, TN = eval.get_overall_results(test_data, m)
print('\nMCC: ' + str(eval.get_MCC(FP, FN, TP, TN)))
print('\n' + str(TP) + '  ' + str(FN))
print('\n' + str(FP) + '  ' + str(TN))

#print('\n%s: %.2f%%' % matthews_corrcoef(y_true, y_pred))
#print('\n%s: %.2f%%' % confusion_matrix(y_true, y_pred))