def free_attn_lstm(dataset_object: LSTM_data): X_train, X_test, Y_train, Y_test = dataset_object.get_memory() X_train, X_test = X_train[:, :, :-12], X_test[:, :, :-12] regressor = Sequential() # Adding the first LSTM layer and some Dropout regularisation regressor.add(LSTM(units=NEURONS, return_sequences=True, activation=ACTIVATION, recurrent_activation="sigmoid", input_shape=(X_train.shape[1], X_train.shape[2]), bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) regressor.add(LSTM(units=NEURONS, activation=ACTIVATION, recurrent_activation="sigmoid", return_sequences=True, bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) # Adding a second LSTM layer and some Dropout regularisation regressor.add(LSTM(units=NEURONS, activation=ACTIVATION, recurrent_activation="sigmoid", bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) # Adding the output layer regressor.add(Dense(units=1, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) ) ) optim = Adam() # Compiling the RNN regressor.compile(optimizer=optim, loss='mean_squared_error') # Fitting the RNN to the Training set history= regressor.fit(X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_test, Y_test), callbacks=[REDUCE_LR, EARLY_STOP] ) regressor.save("data/weights/free_attn_lstm_no_senti") plot_train_loss(history) evaluate(regressor,X_test,Y_test, dataset_object,name="free_attn_lstm", senti="no")
def Train(self, input, target): X_train, X_test, Y_train, Y_test = train_test_split(input, target, train_size=0.75) Y_train = np.asarray(Y_train) Y_test = np.array(Y_test) X_train = np.reshape(X_train, [-1, X_train[0].shape[0], X_train[0].shape[1]]) X_test = np.reshape(X_test, [-1, X_train[0].shape[0], X_train[0].shape[1]]) model = Sequential() model.add(Conv1D(16, 3, padding='same', input_shape=input[0].shape)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization()) model.add(GRU(16, return_sequences=True)) # model.add(Activation("sigmoid")) # model.add(LSTM(lstm_out)) model.add(Flatten()) model.add(Dense(8, activity_regularizer=l2(0.001))) # model.add(GRU(lstm_out, return_sequences=True)) # model.add(LSTM(lstm_out)) # model.add(Dense(20, activity_regularizer=l2(0.001))) model.add(Activation("relu")) model.add(Dense(2)) model.compile(loss=mean_absolute_error, optimizer='nadam', metrics=[RootMeanSquaredError(), MAE]) print(model.summary()) batch_size = 12 epochs = 100 reduce_lr_acc = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=epochs / 10, verbose=1, min_delta=1e-4, mode='max') model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, Y_test), callbacks=[reduce_lr_acc]) model.save("PositionEstimation.h5", overwrite=True) # acc = model.evaluate(X_test, # Y_test, # batch_size=batch_size, # verbose=0) predicted = model.predict(X_test, batch_size=batch_size) # predicted = out.ravel() res = pd.DataFrame({"predicted_x": predicted[:, 0], "predicted_y": predicted[:, 1], "original_x": Y_test[:, 0], "original_y": Y_test[:, 1]}) res.to_excel("res.xlsx")
def dense_net(dataset_object:LSTM_data): X_train, X_test, Y_train, Y_test = dataset_object.get_memory() print(X_test.shape, X_train.shape) X_train = X_train.reshape(X_train.shape[0],X_train.shape[2]) X_test=X_test.reshape(X_test.shape[0], X_test.shape[2]) X_train, X_test = X_train[:, :-12], X_test[:, :-12] print(X_test.shape, X_train.shape) regressor = Sequential() regressor.add(Dense(units=EPOCHS, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dense(units=EPOCHS, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dense(units=EPOCHS, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) regressor.add(Dense(units=1, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) optim = Adam() # Compiling the RNN regressor.compile(optimizer=optim, loss='mean_squared_error') # Fitting the RNN to the Training set history= regressor.fit(X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_test, Y_test), callbacks=[EARLY_STOP, REDUCE_LR]) regressor.save("data/weights/dense_no_senti") plot_train_loss(history) evaluate(regressor, X_test,Y_test, dataset_object,name="dense", senti="yes")