StopPatience = 30

for i in range(len(RunNames)):
    RunName = RunNames[i]
    model = models[0]

    TrainFile = open(path+TrainNames[0], 'rb')
    TestFile = open(path+TestNames[0], 'rb')

    TrainData = pickle.load(TrainFile)
    TestData = pickle.load(TestFile)

    dropChannels = ['time', 'stopId', 'rh1', 'tempg', 'n1', 'tfld1', 'frc1', 'v1', 'trg1', 'trot1', 'dec1', 'tlin1', 'tlin2', 'tamb1']
    dropChannels.append(additionalDropChannel[i])
    X_train = pp.shape_Data_to_LSTM_format(TrainData[0], dropChannels, scale=DataScaling)
    y_train = pp.reduceNumpyTD(pp.shape_Labels_to_LSTM_format(TrainData[1]))
    X_test = pp.shape_Data_to_LSTM_format(TestData[0], dropChannels, scale=DataScaling)
    y_test = pp.reduceNumpyTD(pp.shape_Labels_to_LSTM_format(TestData[1]))

    epochs = 300
    batch_size = 10

    class_weight = {0: 1.,
                    1: 1.
                    }

    m = Sequential()
    input_shape = (X_train.shape[1], X_train.shape[2])
    m = model_setup.modelDict[model](input_shape)

    callback = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=StopPatience, verbose=1, mode='auto')
Exemple #2
0
DataScaling = True
StopPatience = 15
for i in range(len(RunNames)):
    RunName = RunNames[i]
    Trainfilename = TrainfileNames[0]
    Testfilename = TestfileNames[0]
    model = models[i]

    file = open(path + Trainfilename, 'rb')
    TrainData = pickle.load(file)
    file = open(path + Testfilename, 'rb')
    TestData = pickle.load(file)

    X_train = TrainData[0]
    y_train = pp.reduceNumpyTD(TrainData[1])

    X_test = TestData[0]
    y_test = pp.reduceNumpyTD(TestData[1])

    epochs = 1
    batch_size = 10

    m = Sequential()
    input_shape = (X_train.shape[1], X_train.shape[2])
    m = model_setup.modelDict[model](input_shape)

    callback = callbacks.EarlyStopping(monitor='val_loss',
                                       min_delta=0,
                                       patience=StopPatience,
                                       verbose=1,
Exemple #3
0
import pickle
import numpy as np
import pandas as pd
from keras.models import load_model
from Libraries import data_preprocessing as pp
from Libraries import data_evaluation as d_Eval

ModelPath = '/media/computations/DATA/ExperimentalData/Runs/156417/'
ModelName = 'cross4L_16model'
m = load_model(ModelPath+ModelName+'.h5')

DataSetPath = '/media/computations/DATA/ExperimentalData/DataFiles/systemABCD/'
#TestDataSets = ['center8s_pad_B_TestDataPandas', 'center8s_pad_B_TrainDataPandas', 'center8s_pad_D_TestDataPandas', 'center8s_pad_D_TrainDataPandas', 'center8s_pad_C_TestDataPandas', 'center8s_pad_C_TrainDataPandas']
#TestDataSets = ['center8s_pad_TestDataPandas']
TestDataSets = ['center8s_pad_D_TestDataPandas']

TestData = pd.DataFrame()
TestLabel = pd.DataFrame()

dropChannels = ['time', 'stopId']

for name in TestDataSets:
    Data = pickle.load(open(DataSetPath + name + '.p', 'rb'))
    TestData = TestData.append(Data[0])
    TestLabel = TestLabel.append(Data[1])

TestData = pp.shape_Data_to_LSTM_format(TestData, dropChannels=dropChannels)
TestLabel = pp.reduceNumpyTD(pp.shape_Labels_to_LSTM_format(TestLabel))

FP, FN, TP, TN = d_Eval.get_overall_results([(TestData, TestLabel)], m)
MCC = d_Eval.get_MCC(FP, FN, TP, TN)