def run_network(model=None, data=None, data_file='df_dh.csv', isload_model=False, testonly=False): epochs = 3 path_to_dataset = data_file sequence_length = SEQ_LENGTH if data is None: X_train, y_train, X_test, y_test, X_val, Y_val = get_data( sequence_length=sequence_length, stateful=STATEFUL, path_to_dataset=data_file) else: X_train, y_train, X_test, y_test, X_val, Y_val = data print('##################################################################') print(X_train[..., 1]) print(X_test.shape) if STATEFUL: X_test = X_test[:int(X_test.shape[0] / batch_size) * batch_size] y_test = y_test[:int(y_test.shape[0] / batch_size) * batch_size] print(X_test.shape) print(y_test.shape) print(X_test[:, :, 1]) if model is None: model = LSTM2(X_train) # print(model.get_config()) if isload_model == True: try: model.load_weights("./lstm.h5") except Exception as ke: print(str(ke)) if testonly == True: predicted = model.predict(X_test, verbose=1, batch_size=batch_size) predicted_arr = predicted.T.tolist() stat_metrics(X_test, y_test, predicted) draw_scatter(predicted_arr[0], y_test, X_test, X_train, y_train, data_file) return try: print("###################### fit ######################") early_stop = EarlyStopping(monitor='val_loss', patience=20) hist = model.fit( X_train, y_train, batch_size=batch_size, # batch_size=512, nb_epoch=epochs, validation_data=(X_val, Y_val), callbacks=[early_stop], shuffle=False # shuffle=(not STATEFUL) ) # , validation_split=0.05) print(model.get_config()) if isload_model: model.save_weights("./lstm.h5") predicted = model.predict(X_test, verbose=1, batch_size=batch_size) stat_metrics(X_test, y_test, predicted) except KeyboardInterrupt as ke: print(str(ke)) return model, y_test, 0 try: predicted_df = pd.DataFrame(predicted) y_test_df = pd.DataFrame(y_test) # X_test_df = pd.DataFrame(X_test) #columns predicted_df.to_csv(DATAPATH + str(prefix) + data_file + str(batch_size) + str(sequence_length) + "predicted_df.csv") y_test_df.to_csv(DATAPATH + str(prefix) + data_file + str(batch_size) + str(sequence_length) + "y_test_df.csv") # X_test_df.to_csv(DATAPATH+data_file+"X_test_df.csv") except Exception as e: print("failed save predicted_df") raise e try: print("##############################################") print(predicted.shape) predicted_arr = predicted.T.tolist() # print(predicted_arr) draw_scatter(predicted_arr[0], y_test, X_test, X_train, y_train, data_file) his_figures(hist) except Exception as e: print("failed draw picture") raise e # print('Training duration (s) : ', time.time() - global_start_time) return model, y_test, predicted
def run_regressor(model=LSTM2, sequence_length=SEQ_LENGTH, data=None, data_file='df_dh.csv', isload_model=True, testonly=False): epochs = 20000 path_to_dataset = data_file global mses if data is None: X_train, y_train, X_test, y_test, X_val, Y_val = get_data( sequence_length=sequence_length, stateful=STATEFUL, path_to_dataset=data_file) else: X_train, y_train, X_test, y_test, X_val, Y_val = data if STATEFUL: X_test = X_test[:int(X_test.shape[0] / batch_size) * batch_size] y_test = y_test[:int(y_test.shape[0] / batch_size) * batch_size] estimator = KerasRegressor(build_fn=lambda x=X_train: model(x)) # if testonly == True: # # predicted = model.predict(X_test, verbose=1,batch_size=batch_size) # prediction = estimator.predict(X_test) # stat_metrics(X_test, y_test, prediction) # draw_scatter(predicted_arr[0], y_test, X_test, X_train, y_train, data_file) # return early_stopping = EarlyStopping(monitor='val_loss', verbose=1, patience=40) checkpoint = ModelCheckpoint("./lstm.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True) ################ hist = estimator.fit(X_train, y_train, validation_data=(X_val, Y_val), callbacks=[checkpoint], epochs=epochs, batch_size=batch_size, verbose=1) # prediction = estimator.predict(X_test) score = mean_squared_error(y_test, estimator.predict(X_test)) estimator_score = estimator.score(X_test, y_test) print(score) mses.append(score) prediction = estimator.predict(X_test) print(prediction) print(X_test) print("##############################################") # predicted_arr = prediction.T.tolist() # print(predicted_arr) global scaler prediction_, y_test_, y_train_ = inverse_xy_transform( scaler, prediction, y_test, y_train) predicted_df = pd.DataFrame(prediction_) y_test_df = pd.DataFrame(y_test_) # X_test_df = pd.DataFrame(X_test) #columns predicted_df.to_csv(DATAPATH + str(prefix) + data_file + str(batch_size) + str(sequence_length) + "predicted_df.csv") y_test_df.to_csv(DATAPATH + str(prefix) + data_file + str(batch_size) + str(sequence_length) + "y_test_df.csv") # X_test_df.to_csv(DATAPATH+data_file+"X_test_df.csv") draw_scatter(prediction, y_test, X_test, X_train, y_train, data_file) his_figures(hist) draw_line(prediction, y_test, X_test, X_train, y_train, data_file) return predicted_df, y_test_df
def run_regressor(model=LSTM2, data=None, data_file='df_dh.csv', isload_model=True, testonly=False): epochs = 8000 path_to_dataset = data_file sequence_length = SEQ_LENGTH if data is None: X_train, y_train, X_test, y_test, X_val, Y_val = get_data( sequence_length=sequence_length, stateful=STATEFUL, path_to_dataset=data_file) else: X_train, y_train, X_test, y_test, X_val, Y_val = data if STATEFUL: X_test = X_test[:int(X_test.shape[0] / batch_size) * batch_size] y_test = y_test[:int(y_test.shape[0] / batch_size) * batch_size] estimator = KerasRegressor(build_fn=lambda x=X_train: model(x)) # if testonly == True: # # predicted = model.predict(X_test, verbose=1,batch_size=batch_size) # prediction = estimator.predict(X_test) # stat_metrics(X_test, y_test, prediction) # draw_scatter(predicted_arr[0], y_test, X_test, X_train, y_train, data_file) # return early_stopping = EarlyStopping(monitor='val_loss', verbose=1, patience=20) checkpoint = ModelCheckpoint("./lstm.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True) ################ hist = estimator.fit(X_train, y_train, validation_data=(X_val, Y_val), callbacks=[checkpoint], epochs=epochs, batch_size=batch_size, verbose=1) # prediction = estimator.predict(X_test) score = mean_squared_error(y_test, estimator.predict(X_test)) estimator_score = estimator.score(X_test, y_test) print(score) prediction = estimator.predict(X_test) # invert predictions prediction_trans = scaler.inverse_transform(prediction) X_test_trans = scaler.inverse_transform(X_test) y_test_trans = scaler.inverse_transform(y_test) X_train_trans = scaler.inverse_transform(X_train) y_train_trans = scaler.inverse_transform(y_train) print(prediction) print(X_test) print("##############################################") # predicted_arr = prediction.T.tolist() # print(predicted_arr) draw_scatter(prediction, y_test, X_test, X_train, y_train, data_file) his_figures(hist)