Exemple #1
0
def run_live_predict(trained_model, step = 1, future_target = 0):
  hist_wrapper = deribit_wrapper.DeribitWrapper()
  BATCH_SIZE = 1
  df = hist_wrapper.fetch_chart_data(days_lookback=1)
  feature_cols = ["volume", "spread_2", "spread"]   
  df["spread"] = df["high"] - df["low"]
  df["spread_2"] = df["open"] - df["close"]
  features = df[feature_cols].values 
  #print(features[-1])
  means = features.mean(axis=0)
  stds = features.std(axis=0)
  dataset = (features - means) / stds
  past_history = 144
  start_idx = len(df) - (past_history+10)
  x_val_single, y_val_single = multivariate_data(dataset, dataset[:, 2],
                                                start_idx, None, past_history,
                                                future_target, step,
                                                single_step=True)
  val_data_single = tf.data.Dataset.from_tensor_slices((x_val_single, y_val_single))
  val_data_single = val_data_single.batch(BATCH_SIZE).repeat()
  factor = 0
  for x, y in val_data_single.take(1):
    prediction = trained_model.predict(x)[0]
    denormed = (prediction * stds[2]) + means[2]
    target_denorm = (y_val_single[-1] * stds[2]) + means[2]
    print(denormed, target_denorm)
    factor = (target_denorm - denormed) / target_denorm  
  return factor
Exemple #2
0
def get_predictor(STEP = 1, future_target = 5):
  TRAIN_SPLIT = 6000
  BUFFER_SIZE = 455
  BATCH_SIZE = 255
  EVALUATION_INTERVAL = 20
  EPOCHS = 21
  tf.random.set_seed(13)

  feature_cols = ["volume", "spread_2", "spread"]
  hist_wrapper = deribit_wrapper.DeribitWrapper()

  df = hist_wrapper.fetch_chart_data()
  df["spread"] = df["high"] - df["low"]
  df["spread_2"] = df["open"] - df["close"] 
  features = df[feature_cols].values
  means = features.mean(axis=0)
  stds = features.std(axis=0)
  dataset = (features - means) / stds

  past_history = 144
  

  x_train_single, y_train_single = multivariate_data(dataset, dataset[:, 2], 0,
                                                    TRAIN_SPLIT, past_history,
                                                    future_target, STEP,
                                                    single_step=True)
  x_val_single, y_val_single = multivariate_data(dataset, dataset[:, 2],
                                                TRAIN_SPLIT, None, past_history,
                                                future_target, STEP,
                                                single_step=True)

  train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single))
  train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()

  val_data_single = tf.data.Dataset.from_tensor_slices((x_val_single, y_val_single))
  val_data_single = val_data_single.batch(BATCH_SIZE).repeat()


  # setup model
  single_step_model = tf.keras.models.Sequential()
  single_step_model.add(tf.keras.layers.LSTM(89,
                                            input_shape=x_train_single.shape[-2:]))
  single_step_model.add(tf.keras.layers.Dense(1))

  single_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae')

  single_step_history = single_step_model.fit(train_data_single, epochs=EPOCHS,
                                              steps_per_epoch=EVALUATION_INTERVAL,
                                              validation_data=val_data_single,
                                              validation_steps=50)
  
  for x, y in val_data_single.take(1):
    predicted = single_step_model.predict(x)[0]
    plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
                      predicted], 1,
                    'Single Step Prediction')
    plot.show()
    
  return single_step_model
Exemple #3
0
def run_live_predict(trained_model, step=1, future_target=0):
    hist_wrapper = deribit_wrapper.DeribitWrapper()
    BATCH_SIZE = 1
    df = hist_wrapper.fetch_chart_data(days_lookback=1)
    feature_cols = ["volume", "spread_roc", "spread"]
    df["spread"] = df["high"] - df["low"]
    df["spread_roc"] = df["spread"].rolling(window=55).mean()
    df.dropna(inplace=True)
    print(df.iloc[-1])
    features = df[feature_cols].values
    #print(features[-1])
    means = features.mean(axis=0)
    stds = features.std(axis=0)
    dataset = (features - means) / stds
    past_history = 144
    start_idx = len(df) - (past_history + 10)
    x_val_single, y_val_single = multivariate_data(dataset,
                                                   dataset[:, 2],
                                                   start_idx,
                                                   None,
                                                   past_history,
                                                   future_target,
                                                   step,
                                                   single_step=True)
    val_data_single = tf.data.Dataset.from_tensor_slices(
        (x_val_single, y_val_single))
    val_data_single = val_data_single.batch(BATCH_SIZE).repeat()
    factor = 0
    for x, y in val_data_single.take(1):
        prediction = trained_model.predict(x)[0]
        denormed = (prediction * stds[2]) + means[2]
        target_denorm = (y_val_single[-1] * stds[2]) + means[2]
        last_spread = 0.0
        i = 1
        while last_spread == 0.0:
            last_spread = df["spread"].iloc[-i]
            i += 1
        factor = (last_spread - denormed) / last_spread
        if factor < 0.0:
            factor = factor * -1
    return factor / 10