Пример #1
0
 def _train(self):
     data, label = self._load_data()
     train_data, train_label, validate_data, validate_label, test_data, test_label = split_data(data, label,
                                                                                                to_categorical=True)
     network_input = Input(shape=(100, 3))
     network = LSTM(32, return_sequences=True)(network_input)
     network = LSTM(32)(network)
     network = Dense(5, activation=softmax)(network)
     network = Model(inputs=[network_input], outputs=[network])
     network.compile(optimizer=RMSprop(lr=0.01), loss=categorical_crossentropy, metrics=[categorical_accuracy])
     network.summary()
     callback = [
         callbacks.ReduceLROnPlateau(monitor="categorical_accuracy", factor=0.1, patience=3)
     ]
     self.train_history = network.fit(train_data, train_label,
                                      validation_data=(validate_data, validate_label), batch_size=self.BATCH_SIZE,
                                      epochs=self.EPOCHS, callbacks=callback)
     self.evaluate_history = network.evaluate(test_data, test_label, batch_size=self.BATCH_SIZE)
     return network
Пример #2
0
def run_LSTM(X_train, X_test, y_train, y_test, symbol, price):
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)
    LSTM = create_LSTM(X_train)
    history = LSTM.fit(X_train, y_train, epochs=100, batch_size=32)
    print(LSTM.summary())

    results = LSTM.evaluate(X_test, y_test, batch_size=32)
    print("test loss, test acc:", results)
    # summarize history for accuracy
    plt.plot(history.history['accuracy'], label='Train')
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

    prediction = LSTM.predict(X_test)
    y_prediction = prediction[:, -1, :]
    y_prediction = np.argmax(prediction, axis=1)
    print(y_prediction)
    NAV_history = Generate_nav(10000, symbol, price, y_prediction)
Пример #3
0
merge = Dense(8)(merge)
output = Dense(1)(merge)

output_1 = Dense(4)(output)
output_1 = Dense(1)(output_1)

output_2 = Dense(4)(output)
output_2 = Dense(1)(output_2)

model = Model(inputs=[input, input2], outputs=[output_1, output_2])
model.summary()
# quit()

model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.fit([x, x2], [y, y2], epochs=100, batch_size=1)
res = model.evaluate([x, x2], [y, y2], batch_size=1)

from keras.callbacks import EarlyStopping

# callback = EarlyStopping(monitor='loss', patience=20, mode='auto')
# # callback = EarlyStopping(monitor='acc', patience=20, mode='max')
# model2.compile(loss='mse', optimizer='adam', metrics=['mse'])
# model2.fit(x_, y, epochs=1000, batch_size=1, callbacks=[callback])
# loss2, mse2 = model2.evaluate(x_, y, batch_size=1, verbose=1)

# x_input = np.array([[6.5, 7.5, 8.5], [50, 60, 70], [70, 80, 90], [100, 110, 120]])
# x = x_input.reshape(-1, 3, 1)

# y_pred = model.predict(x)
print(res)
# print(loss2, mse2)