示例#1
0
# prepare data
input_data, price_data, input_data_dummy = remove_nan_rows(
    [input_data_raw, price_data_raw, input_data_dummy])
input_data_scaled_no_dummies = (input_data - min_max_scaling[1, :]) / (
    min_max_scaling[0, :] - min_max_scaling[1, :])
input_data_scaled = np.concatenate(
    [input_data_scaled_no_dummies, input_data_dummy], axis=1)
input_data_lstm, _ = get_lstm_input_output(input_data_scaled,
                                           np.zeros_like(input_data),
                                           time_steps=time_steps)
price_data = price_data[-len(input_data_lstm):]

# split to train,test and cross validation
input_train, input_test, input_cv, price_train, price_test, price_cv = \
    train_test_validation_split([input_data_lstm, price_data], split=split)

# get dims
_, _, input_dim = np.shape(input_train)

# forward-propagation
x, y, logits, y_, learning_r, drop_out = lstm_nn(input_dim,
                                                 3,
                                                 time_steps=time_steps,
                                                 n_hidden=[3])

# tf cost and optimizer
price_h = tf.placeholder(tf.float32, [None, 1])
signals = tf.constant([[1., -1., -1e-10]])
cost = (tf.reduce_mean(y_ * signals * price_h * 100))  # profit function
train_step = tf.train.AdamOptimizer(learning_r).minimize(-cost)
示例#2
0
input_data_raw, input_data_dummy_raw = get_features(oanda_data)
price_data_raw = np.concatenate([[[0]],
                                 (price_data_raw[1:] - price_data_raw[:-1]) / (price_data_raw[1:] + 1e-10)], axis=0)

# prepare data
input_data, output_data, input_data_dummy, price_data = \
    remove_nan_rows([input_data_raw, output_data_raw,
                     input_data_dummy_raw, price_data_raw])
input_data_scaled_no_dummies = (
    input_data - min_max_scaling[1, :]) / (min_max_scaling[0, :] - min_max_scaling[1, :])
input_data_scaled = np.concatenate(
    [input_data_scaled_no_dummies, input_data_dummy], axis=1)

# split to train, test and cross validation
input_train, input_test, input_cv, output_train, output_test, output_cv, price_train, price_test, price_cv = \
    train_test_validation_split(
        [input_data_scaled, output_data, price_data], split=split)

# get dims
_, input_dim = np.shape(input_train)
_, output_dim = np.shape(output_train)

# forward-propagation
x, y, logits, y_, learning_r, drop_out = logistic_regression(
    input_dim, output_dim)

# tf cost and optimizer
cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
train_step = tf.train.AdamOptimizer(learning_r).minimize(cost)

# init session
oanda_data = np.load('data\\EUR_USD_H1.npy')[-50000:]
y_data = price_to_binary_target(oanda_data, delta=0.000275)
x_data = get_features_v2(oanda_data,
                         time_periods=[10, 25, 50, 120, 256],
                         return_numpy=False)

# separate, rearrange and remove nans
price = x_data['price'].as_matrix().reshape(-1, 1)
price_change = x_data['price_delta'].as_matrix().reshape(-1, 1)
x_data = x_data.drop(['price', 'price_delta'], axis=1).as_matrix()
price, price_change, x_data, y_data = remove_nan_rows(
    [price, price_change, x_data, y_data])

# split to train, test and cross validation
input_train, input_test, input_cv, output_train, output_test, output_cv, price_train, price_test, price_cv = \
    train_test_validation_split([x_data, y_data, price_change], split=split)

# pre-process data: scale, pca, polynomial
input_train, input_test, input_cv = min_max_scale(input_train,
                                                  input_test,
                                                  input_cv,
                                                  std_dev_threshold=2.5)
# input_train, input_test, input_cv = get_pca(input_train, input_test, input_cv, threshold=0.01)
input_train, input_test, input_cv = get_poloynomials(input_train,
                                                     input_test,
                                                     input_cv,
                                                     degree=2)

# get dims
_, input_dim = np.shape(input_train)
_, output_dim = np.shape(output_train)