def lstm(self, path=None, name=None): trainX = np.reshape(self.X_train, (self.X_train.shape[0], 1, self.X_train.shape[1])) testX = np.reshape(self.X_test, (self.X_test.shape[0], 1, self.X_test.shape[1])) model = Sequential() model.add( LSTM(32, batch_input_shape=(1, trainX.shape[1], trainX.shape[2]), stateful=True)) # model.add(Activation('tanh')) model.add(Dense(1)) # model.add(Activation('linear')) model.compile(loss='mean_squared_error', optimizer='adam') # model.summary() for i in range(50): model.fit(trainX, self.y_train, epochs=1, batch_size=1, verbose=self.VERBOSE, shuffle=False) model.reset_states() y_pred = model.predict(testX, batch_size=1) y_pred = y_pred.reshape(-1) # mae = MeanAbsoluteError() # error = mae(self.y_test, y_predict).numpy() # mape = MeanAbsolutePercentageError() # error = mape(self.y_test, y_predict).numpy() mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() # msle = MeanSquaredLogarithmicError() # loss_msle = msle(y_test, y_pred).numpy() # print(error) # # plt.plot(self.y_test) # plt.plot(y_predict) # plt.show() # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts) # show_plot(self.y_test, y_predict) return loss_rmse, loss_mae, loss_mape
def dnn(self, path=None, name=None): model = Sequential() print(len(self.X_train)) model.add(Dense(self.INPUT_DIM, input_shape=(self.INPUT_DIM, ))) model.add(Activation('relu')) for i in range(7): model.add(Dense(self.N_HIDDEN)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('linear')) model.summary() model.compile(loss='mse', optimizer=self.OPTIMIZER, metrics=['accuracy']) history = model.fit(self.X_train, self.y_train, epochs=self.NB_EPOCH, verbose=self.VERBOSE) y_pred = model.predict(self.X_test) y_pred = y_pred.reshape(-1) # mae = MeanAbsoluteError() # error = mae(self.y_test, y_predict).numpy() # print(error) # # mape = MeanAbsolutePercentageError() # error = mape(self.y_test, y_predict).numpy() # # print(error) # plt.plot(self.y_test) plt.plot(y_pred) plt.legend(['real', 'prediction']) plt.savefig(name + '.png') plt.show() r = pd.DataFrame(self.y_test) p = pd.DataFrame(y_pred) r.to_excel(name + '-real.xlsx') p.to_excel(name + '-prediction.xlsx') # mape_error = mean_absolute_percentage_error(self.y_test, y_predict) # save_info(self.y_test, y_predict, name, mape_error, self.WINDOW_SIZE, path, self.bs, self.ts) mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mae, loss_mape
def CNN_LSTM(self, name=None): self.X_train = self.X_train.values self.X_test = self.X_test.values trainX = np.reshape(self.X_train, (self.X_train.shape[0], 64, -1)) testX = np.reshape(self.X_test, (self.X_test.shape[0], 64, -1)) with tf.device('/device:CPU:0'): model = Sequential() input_layer = Input(shape=(64, 1)) conv1 = Conv1D(filters=32, kernel_size=8, strides=1, activation='relu', padding='same')(input_layer) lstm1 = LSTM(32, return_sequences=True)(conv1) output_layer = Dense(1, activation='linear')(lstm1) model = Model(inputs=input_layer, outputs=output_layer) # print(model.summary()) model.compile(loss='mse', optimizer='adam') model.fit(trainX, self.y_train, epochs=2000, batch_size=32, verbose=self.VERBOSE) y_pred = model.predict(testX, batch_size=1) y_pred = y_pred.reshape(-1) print(self.y_test.shape) print(y_pred.shape) plt.plot(self.y_test) plt.plot(y_pred) plt.legend(['real', 'prediction']) plt.savefig(f'./results/{name}.png') plt.clf() # plt.show() mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mse, loss_mae, loss_mape
def CNN(self, name=None): self.X_train = self.X_train.values self.X_test = self.X_test.values trainX = np.reshape(self.X_train, (self.X_train.shape[0], 9, -1)) testX = np.reshape(self.X_test, (self.X_test.shape[0], 9, -1)) with tf.device('/device:CPU:0'): model = Sequential() model.add( Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], trainX.shape[2]))) model.add(Dropout(0.5)) # model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dense(1, activation='linear')) model.compile(loss='mse', optimizer='adam') model.fit(trainX, self.y_train, epochs=2000, batch_size=32, verbose=self.VERBOSE) y_pred = model.predict(testX, batch_size=1) y_pred = y_pred.reshape(-1) print(self.y_test.shape) print(y_pred.shape) plt.plot(self.y_test) plt.plot(y_pred) plt.legend(['real', 'prediction']) plt.savefig(f'./results/{name}.png') plt.clf() # plt.show() mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mse, loss_mae, loss_mape
def lstm(self, path=None, name=None): self.X_train = self.X_train.values self.X_test = self.X_test.values trainX = np.reshape(self.X_train, (self.X_train.shape[0], 1, self.X_train.shape[1])) testX = np.reshape(self.X_test, (self.X_test.shape[0], 1, self.X_test.shape[1])) model = Sequential() model.add( LSTM(128, batch_input_shape=(1, trainX.shape[1], trainX.shape[2]), stateful=True)) # model.add(Activation('tanh')) model.add(Dense(1)) # model.add(Activation('linear')) model.compile(loss='mean_squared_error', optimizer='adam') model.summary() for i in range(2000): model.fit(trainX, self.y_train, epochs=1, batch_size=1, verbose=self.VERBOSE, shuffle=False) model.reset_states() y_pred = model.predict(testX, batch_size=1) y_pred = y_pred.reshape(-1) plt.plot(self.y_test) plt.plot(y_pred) plt.legend(['real', 'prediction']) plt.savefig(f'./results/{name}.png') plt.clf() # plt.show() mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mse, loss_mae, loss_mape
def lstm_with_sequence(self, path=None, name=None): self.X_train = self.X_train.values self.X_test = self.X_test.values trainX = np.reshape(self.X_train, (self.X_train.shape[0], -1, 3)) testX = np.reshape(self.X_test, (self.X_test.shape[0], -1, 3)) with tf.device('/device:CPU:0'): model = Sequential() model.add( LSTM(128, activation='relu', input_shape=(trainX.shape[1], trainX.shape[2]))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') # model.summary() model.fit(trainX, self.y_train, epochs=300, batch_size=10, verbose=self.VERBOSE, shuffle=False) y_pred = model.predict(testX, batch_size=1) y_pred = y_pred.reshape(-1) print(self.y_test.shape) print(y_pred.shape) plt.plot(self.y_test) plt.plot(y_pred) plt.legend(['real', 'prediction']) plt.savefig(f'./results/{name}.png') plt.clf() # plt.show() mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mse, loss_mae, loss_mape
def getErrors(self, predictions, test_data): mse = MeanSquaredError() mse_outcome = mse(np.insert(predictions, 0, self.batch_size, axis=0), np.insert(test_data, 0, self.batch_size, axis=0)).numpy() mae = MeanAbsoluteError() mae_outcome = mae(np.insert(predictions, 0, self.batch_size, axis=0), np.insert(test_data, 0, self.batch_size, axis=0)).numpy() mape = MeanAbsolutePercentageError() mape_outcome = mape(np.insert(predictions, 0, self.batch_size, axis=0), np.insert(test_data, 0, self.batch_size, axis=0)).numpy() return (mse_outcome, mae_outcome, mape_outcome)
def dnn(self, path=None, name=None): with tf.device('/device:GPU:0'): model = Sequential() # print(self.X_train.shape[1]) model.add(Dense(self.X_train.shape[1], input_shape=(self.X_train.shape[1],))) model.add(Activation('relu')) for i in range(3): model.add(Dense(self.N_HIDDEN)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('soft-max')) model.summary() model.compile(loss='mse', optimizer=self.OPTIMIZER, metrics=['accuracy']) history = model.fit(self.X_train, self.y_train, epochs=self.NB_EPOCH, verbose=self.VERBOSE) print(self.X_train) y_pred = model.predict(self.X_test) y_pred = y_pred.reshape(-1) plt.plot(self.y_test) plt.plot(y_pred) plt.legend(['real', 'prediction']) plt.savefig(f'./results/{name}.png') plt.clf() # plt.show() mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mse, loss_mae, loss_mape
def svr(self): from sklearn.svm import SVR from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler regr = make_pipeline(SVR(C=1.0, epsilon=0.2)) regr.fit(self.X_train, self.y_train) y_pred = regr.predict(self.X_test) y_pred = y_pred.reshape(-1) mse = MeanSquaredError() loss_mse = mse(self.y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(self.y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(self.y_test, y_pred).numpy() return loss_rmse, loss_mae, loss_mape
model.add(LSTM(50, return_sequences=True, activation=activation_funct)) if useDropout: model.add(Dropout(dropout)) model.add(LSTM(50, return_sequences=False, activation=activation_funct)) model.add(Dense(1, activation=activation_funct)) optimizer = RMSprop(learning_rate=learning_rate) model.compile(optimizer=optimizer, loss=loss) return model, scaler learning_rate = 0.001 momentum = 0.9 # optimizer = RMSprop(learning_rate=learning_rate) created in each RNN class loss = 'mean_absolute_percentage_error' mape = MeanAbsolutePercentageError() activation_funct = 'elu' epochs = 100 batch_size = 150 dropout = 0.2 train_data_columns = ['high'] test_data_column = ['high'] day_ranges = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60] def get_data(day_range): full_apple_train = get_stock_data("AAPL", "2003-02-10", "2004-09-12") last_59_train = full_apple_train[-(day_range - 1):] part_apple_test = get_stock_data("AAPL", "2004-09-13", "2005-01-22") full_apple_test = last_59_train.append(part_apple_test) full_ibm_train = get_stock_data("IBM", "2003-02-10", "2004-09-12")
def main(): table = 'Table2' stack = 'Stack2' gas = 'SO2' NN = 'LSTM' OP = 'MVO' data = pd.read_excel(stack + '.xlsx') # data = data.set_index(data.iloc[:, 0]) # data = data.iloc[:, 1:] # data = data.dropna() # data = data.iloc[1:] # data.to_excel('Stack2.xlsx') W_S = data['W_S'] T = data['T'] data = data[gas] # 250 490 737 985 may = data[:250] june = data[250:490] july = data[490:737] agust = data[737:985] september = data[985:] may_w_s = W_S[:250] june_w_s = W_S[250:490] july_w_s = W_S[490:737] agust_w_s = W_S[737:985] september_w_s = W_S[985:] may_t = T[:250] june_t = T[250:490] july_t = T[490:737] agust_t = T[737:985] september_t = T[985:] d = [may, june, july, agust, september] d_w_s = [may_w_s, june_w_s, july_w_s, agust_w_s, september_w_s] d_t = [may_t, june_t, july_t, agust_t, september_t] dd = ['may', 'june', 'july', 'agust', 'september'] BS = 3 TS = None p = dict() pp = dict() # for i in range(5): # dnn = DNN(d[i], BS, TS) # rmse, mae, mape = dnn.svr() # p[dd[i]] = [rmse, mae, mape] # pp = pd.DataFrame(p) # pp.to_excel('2so.xlsx') for i in range(5): X = d[i] X, y = prepare_data_window(X, BS, d_w_s[i], d_t[i]) y = np.array(y) y = y.reshape(-1, 1) scaler_X = MinMaxScaler() scaler_X = scaler_X.fit(X) X = scaler_X.transform(X) scaler_y = MinMaxScaler() scaler_y = scaler_y.fit(y) y = scaler_y.transform(y) y = y.reshape(-1, 1) # LSTM************************ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33, shuffle=False) X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1])) X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1])) y_train = np.array(y_train) y_test = np.array(y_test) y_train = y_train.flatten() y_test = y_test.flatten() input_shape = X_train.shape model = Model().get_lstm(input_shape) Wb = get_weights_and_biases(model) Wb_flatten = flatten_weights_and_biases(Wb) dimensions = len(Wb_flatten) # # PSO # pso = PSO(model, dimensions, X_train, y_train, None, # init_weights=None, n_iteration=50) # cost, pos = pso.PSO() # MVO mvo = MVO(model, dimensions, X_train, y_train) cost, pos = mvo.MVO() model = Model().get_lstm(input_shape) Wb_model = unflatten_weights_and_biases(Wb, pos) model = put_weights_and_biases(model, Wb_model) y_pred = model.predict(X_test) # # mse = MeanSquaredError() # # loss = mse(y_test, y_pred).numpy() # # # with open(f'./Results/{file_name}', 'w') as f: # # f.write(str(loss)) # # LSTM-------------------------------- # # ENN+++++++++++++++++++++++++++++++++ # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33, shuffle=False) # y_train = y_train.reshape(-1, 1) # # y_train = np.array(y_train) # y_test = np.array(y_test) # # # y_train = y_train.flatten() # # y_test = y_test.flatten() # # net = Model().get_elman(3) # w, s = get_elman_weights(net) # # dimensions = len(w) # # # error = net.train(X_train, y_train, epochs=500, show=100, goal=0.01) # # # PSO # pso_elman = PSO_ENN.PSO(net, dimensions, X_train, y_train, None, # init_weights=None, n_iteration=50) # cost, pos = pso_elman.PSO() # # # # MVO # # mvo = MVO_ENN.MVO(net, dimensions, X_train, y_train) # # cost, pos = mvo.MVO() # # net = set_elman_weights(net, pos) # # y_pred = net.sim(X_test) # # # # model = Model().get_lstm(input_shape) # # # Wb_model = unflatten_weights_and_biases(Wb, pos) # # # model = put_weights_and_biases(model, Wb_model) # # # y_pred = model.predict(X_test) # # # ENN--------------------------------- mse = MeanSquaredError() loss_mse = mse(y_test, y_pred).numpy() loss_rmse = np.sqrt(loss_mse) mae = MeanAbsoluteError() loss_mae = mae(y_test, y_pred).numpy() mape = MeanAbsolutePercentageError() loss_mape = mape(y_test, y_pred).numpy() p[dd[i]] = [loss_rmse, loss_mae, loss_mape] file_name_real = f'{stack}_{gas}_{dd[i]}_{NN}_{OP}_real.xlsx' file_name_pred = f'{stack}_{gas}_{dd[i]}_{NN}_{OP}_pred.xlsx' pp[file_name_real] = y_test pp[file_name_pred] = y_pred # plt.plot(y_pred) # plt.plot(y_test) # plt.show() p2 = pd.DataFrame(p) p2.to_excel('2so.xlsx') pp2 = pd.DataFrame.from_dict(pp, orient='index') pp2 = pp2.transpose() pp2.to_excel(f'{table}_{stack}_{gas}_{NN}_{OP}.xlsx')