LSTM(units=10, return_sequences=False, input_shape=(X_TRAIN.shape[1], 1))) model.add(Dropout(0.1)) # model.add(LSTM(units=28, return_sequences=True)) # model.add(Dropout(0.2)) # model.add(LSTM(units=14, return_sequences=False)) # model.add(Dropout(0.2)) model.add(Dense(units=1)) # Compile and fit model model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(X_TRAIN, Y_TRAIN, validation_data=(X_TEST, Y_TEST), epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False) loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) # model.save(f'../saved_model/BCH_model_for_days_val_lose_{val_loss[-1]}') # dump(sc, f'../saved_model/BCH_scaler_for_model_for_days_val_lose_{val_loss[-1]}') model.save(f'../saved_model/BCH_model_for_days_new') dump(sc, f'../saved_model/BCH_scaler_for_model_for_days_new') save_info(ONE_BATCH_SIZE, BATCH_SIZE, EPOCHS, TRAIN_TEST_SPLIT_POINT, loss, val_loss, model, 'BCH', 'days') draw_training_and_validation_lost_plot(epochs, loss, val_loss)
model.add( LSTM(units=12, return_sequences=False, input_shape=(X_TRAIN.shape[1], 1))) model.add(Dropout(0.1)) # model.add(LSTM(units=16, return_sequences=True)) # model.add(Dropout(0.2)) # model.add(LSTM(units=8, return_sequences=False)) # model.add(Dropout(0.2)) model.add(Dense(units=1)) # Compile and fit model model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(X_TRAIN, Y_TRAIN, validation_data=(X_TEST, Y_TEST), epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False) loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) model.save(f'../saved_model/LTC_model_for_minutes_val_lose_{val_loss[-1]}') dump( sc, f'../saved_model/LTC_scaler_for_model_for_minutes_val_lose_{val_loss[-1]}') save_info(ONE_BATCH_SIZE, BATCH_SIZE, EPOCHS, TRAIN_TEST_SPLIT_POINT, loss, val_loss, model, 'LTC', 'minutes') draw_training_and_validation_lost_plot(epochs, loss, val_loss)
# model.add(LSTM(units=28, return_sequences=True)) # model.add(Dropout(0.2)) # model.add(LSTM(units=14, return_sequences=False)) # model.add(Dropout(0.2)) model.add(Dense(units=1)) # Compile and fit model model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(X_TRAIN, Y_TRAIN, validation_data=(X_TEST, Y_TEST), epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False) loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) model.save( f'../saved_model/LTC_model_for_days_from_full_data__val_lose_{val_loss[-1]}' ) dump( sc, f'../saved_model/LTC_scaler_for_model_for_full_data_days_val_lose_{val_loss[-1]}' ) save_info(ONE_BATCH_SIZE, BATCH_SIZE, EPOCHS, TRAIN_TEST_SPLIT_POINT, loss, val_loss, model, 'LTC', 'full_data_days') draw_training_and_validation_lost_plot(epochs, loss, val_loss)
# model.add(LSTM(units=28, return_sequences=True)) # model.add(Dropout(0.2)) # model.add(LSTM(units=14, return_sequences=False)) # model.add(Dropout(0.2)) model.add(Dense(units=1)) # Compile and fit model model.compile(optimizer='adam', loss='mean_squared_error') history = model.fit(X_TRAIN, Y_TRAIN, validation_data=(X_TEST, Y_TEST), epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=False) loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) model.save( f'../saved_model/ETH_model_for_minutes_from_full_data__val_lose_{val_loss[-1]}' ) dump( sc, f'../saved_model/ETH_scaler_for_model_for_full_data_minutes_val_lose_{val_loss[-1]}' ) save_info(ONE_BATCH_SIZE, BATCH_SIZE, EPOCHS, TRAIN_TEST_SPLIT_POINT, loss, val_loss, model, 'ETH', 'full_data_minutes') draw_training_and_validation_lost_plot(epochs, loss, val_loss)