Ejemplo n.º 1
0
def inference(args, cnn_features):
    tf.reset_default_graph()
    with tf.Session() as sess:
        net = LSTM(sess, max_seq_len=25, h_dim=1024)
        net.build_model()
        net.inference(cnn_features=cnn_features,
                      label_file=args.label_file,
                      gen_from=args.gen_from,
                      out_path=args.output_folder,
                      bsize=args.bsize)
Ejemplo n.º 2
0
def main():
    np.random.seed(RANDOM_SEED)  # 方便结果重现
    data = pd.read_csv(DATA_PATH)
    scaler, data = preprocessing_data.process_normalization(
        data=data,
        useful_colmns=[
            'Open', 'High', 'Low', 'Close', 'Volume', 'Market Cap', 'Weekday',
            'Increase'
        ])
    training_set, test_set = generate_batch.split_data(data)
    training_inputs, train_outputs = generate_batch.generate_batchs(
        training_set)
    test_inputs, test_outputs = generate_batch.generate_batchs(test_set)
    model = LSTM.build_model(inputs=training_inputs,
                             output_size=1,
                             LSTM_units=20)
    history = model.fit(training_inputs,
                        train_outputs,
                        batch_size=1,
                        epochs=30,
                        verbose=2,
                        shuffle=True)
    fig, ax1 = plt.subplots(1, 1)
    ax1.plot(history.epoch, history.history['loss'])
    ax1.set_title('Training loss')
    ax1.set_ylabel('Mean Absolute Error (MAE)', fontsize=12)
    ax1.set_xlabel('# Epochs', fontsize=12)
    plt.show()

    h = model.predict(test_inputs)
    dat = np.concatenate((test_outputs, h), axis=1)
    dat = dat * scaler.data_range_[-1] + scaler.data_min_[-1]
    df = pd.DataFrame(dat, columns=['actual', 'prediction'])
    df.to_csv('prediction.csv', index=False)
Ejemplo n.º 3
0
    data = HistoryData(DATA_PATH)
    data.date2weekday().add_increase_col().drop_columns(['Date']).scalar(
        ['Open*', 'High', 'Close**'], 0.01).split(0.8)
    train_x, train_y, test_x, test_y = data.generate_train_test_data(
        x_columns=['Open*', 'High', 'Close**', 'Weekday'],
        y_columns=['Close**'],
        window_len=7,
        flatten=False)
    print(train_x.shape)
    print(test_x.shape)
    print(train_y.shape)
    print(test_y.shape)

    # 模型训练
    model = LSTM.build_model(inputs=train_x,
                             output_size=7,
                             LSTM_units=20,
                             loss='mean_squared_error')
    history = model.fit(train_x,
                        train_y,
                        batch_size=32,
                        epochs=200,
                        verbose=1,
                        shuffle=True)

    # loss可视化
    fig, ax1 = plt.subplots(1, 1)
    ax1.plot(history.epoch, history.history['loss'])
    ax1.set_title('Training loss')
    ax1.set_ylabel('Mean Absolute Error (MAE)', fontsize=12)
    ax1.set_xlabel('# Epochs', fontsize=12)
    plt.show()