y_train, validation_split=0.2, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[callback]) print('\n results of ' + RunName + ' on Model ' + model + ' with data set ' + Testfilename) print('\n epochs: ' + str(epochs) + '\n batch size: ' + str(batch_size) + '\n stop patience:' + str(StopPatience) + ' \n scaling: ' + str(DataScaling)) FP, FN, TP, TN = d_Eval.get_overall_results([(X_test, y_test)], m) m_Eval.eval_all([history], epochs, RunName, m, Savepath, TestData) MCC = d_Eval.get_MCC(FP, FN, TP, TN) print('&y&' + str(MCC)[0:4] + '&' + str(TP) + '&' + str(TN) + '&' + str(FP) + '&' + str(FN) + '\\' + '\\') SaveInfo.loc[RunName, 'MCC'] = MCC SaveInfo.loc[RunName, 'TP'] = TP SaveInfo.loc[RunName, 'TN'] = TN SaveInfo.loc[RunName, 'FP'] = FP SaveInfo.loc[RunName, 'FN'] = FN SaveInfo.loc[RunName, 'model'] = model ###### find best Models NumberOfModels = 10 ModelNameList = [] MCC_list = pd.to_numeric(SaveInfo.loc[:, 'MCC']) while len(ModelNameList) < NumberOfModels:
test_data = list() epochs = 100 for currData in Data: seed = 0 X = pp.shape_Data_to_LSTM_format(currData[0], dropChannels) y = pp.shape_Labels_to_LSTM_format(currData[1]) #y = np.reshape(pp.reduceLabel(y).values, (X.shape[0], 1, 1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed) batch_size = 5 if X_train.shape[0] >= 2: hist = m.fit(X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=batch_size, verbose=2) test_data.append((X_test, y_test)) m.save('my_model.h5') json_string = m.to_json() FP, FN, TP, TN = eval.get_overall_results(test_data, m) print('\nMCC: ' + str(eval.get_MCC(FP, FN, TP, TN))) print('\n' + str(TP) + ' ' + str(FN)) print('\n' + str(FP) + ' ' + str(TN)) #print('\n%s: %.2f%%' % matthews_corrcoef(y_true, y_pred)) #print('\n%s: %.2f%%' % confusion_matrix(y_true, y_pred))