예제 #1
0
def train_and_predict(csv_file_name=_DATA_FILE, training_steps=200):
    """Train and predict using a custom time series model."""
    # Construct an Estimator from our LSTM model.
    estimator = ts_estimators._TimeSeriesRegressor(
        model=_LSTMModel(num_features=5, num_units=128),
        optimizer=tf.train.AdamOptimizer(0.001))
    reader = tf.contrib.timeseries.CSVReader(
        csv_file_name,
        column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES, ) +
                      (tf.contrib.timeseries.TrainEvalFeatures.VALUES, ) * 5))
    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=4,
                                                               window_size=32)
    estimator.train(input_fn=train_input_fn, steps=training_steps)
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
    # Predict starting after the evaluation
    (predictions, ) = tuple(
        estimator.predict(
            input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                evaluation, steps=100)))
    times = evaluation["times"][0]
    observed = evaluation["observed"][0, :, :]
    predicted_mean = numpy.squeeze(
        numpy.concatenate([evaluation["mean"][0], predictions["mean"]],
                          axis=0))
    all_times = numpy.concatenate([times, predictions["times"]], axis=0)
    return times, observed, all_times, predicted_mean
예제 #2
0
def train_and_predict(csv_file_name=_DATA_FILE, training_steps=200):
  """Train and predict using a custom time series model."""
  # Construct an Estimator from our LSTM model.
  estimator = ts_estimators._TimeSeriesRegressor(
      model=_LSTMModel(num_features=5, num_units=128),
      optimizer=tf.train.AdamOptimizer(0.001))
  reader = tf.contrib.timeseries.CSVReader(
      csv_file_name,
      column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
                    + (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
  train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
      reader, batch_size=4, window_size=32)
  estimator.train(input_fn=train_input_fn, steps=training_steps)
  evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
  evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
  # Predict starting after the evaluation
  (predictions,) = tuple(estimator.predict(
      input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
          evaluation, steps=100)))
  times = evaluation["times"][0]
  observed = evaluation["observed"][0, :, :]
  predicted_mean = numpy.squeeze(numpy.concatenate(
      [evaluation["mean"][0], predictions["mean"]], axis=0))
  all_times = numpy.concatenate([times, predictions["times"]], axis=0)
  return times, observed, all_times, predicted_mean
예제 #3
0
        train_y.append(tmp)
    train_y_all = np.array(train_y)
    train_y = train_y_all[0:900]
    train_x = np.array(range(900))
    lable_y = train_y_all[900:]
    lable_x = np.array(range(900, 1082))
    data = {
        tf.contrib.timeseries.TrainEvalFeatures.TIMES: train_x,
        tf.contrib.timeseries.TrainEvalFeatures.VALUES: train_y,
    }
    reader = NumpyReader(data)
    train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader,
                                                               batch_size=8,
                                                               window_size=180)
    estimator = ts_estimators._TimeSeriesRegressor(
        model=_LSTMModel(num_features=1, num_units=144),
        optimizer=tf.train.AdamOptimizer(0.001))

    estimator.train(input_fn=train_input_fn, steps=2000)
    evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
    evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
    # Predict starting after the evaluation
    (predictions, ) = tuple(
        estimator.predict(
            input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
                evaluation, steps=180)))

    observed_times = evaluation["times"][0]
    observed = evaluation["observed"][0, :, :]
    evaluated_times = evaluation["times"][0]
    evaluated = evaluation["mean"][0]
예제 #4
0
def _train_on_generated_data(
    generate_fn, generative_model, train_iterations, seed,
    learning_rate=0.1, ignore_params_fn=lambda _: (),
    derived_param_test_fn=lambda _: (),
    train_input_fn_type=input_pipeline.WholeDatasetInputFn,
    train_state_manager=state_management.PassthroughStateManager()):
  """The training portion of parameter recovery tests."""
  random_seed.set_random_seed(seed)
  generate_graph = ops.Graph()
  with generate_graph.as_default():
    with session.Session(graph=generate_graph):
      generative_model.initialize_graph()
      time_series_reader, true_parameters = generate_fn(generative_model)
      true_parameters = {
          tensor.name: value for tensor, value in true_parameters.items()}
  eval_input_fn = input_pipeline.WholeDatasetInputFn(time_series_reader)
  eval_state_manager = state_management.PassthroughStateManager()
  true_parameter_eval_graph = ops.Graph()
  with true_parameter_eval_graph.as_default():
    generative_model.initialize_graph()
    ignore_params = ignore_params_fn(generative_model)
    feature_dict, _ = eval_input_fn()
    eval_state_manager.initialize_graph(generative_model)
    feature_dict[TrainEvalFeatures.VALUES] = math_ops.cast(
        feature_dict[TrainEvalFeatures.VALUES], generative_model.dtype)
    model_outputs = eval_state_manager.define_loss(
        model=generative_model,
        features=feature_dict,
        mode=estimator_lib.ModeKeys.EVAL)
    with session.Session(graph=true_parameter_eval_graph) as sess:
      variables.global_variables_initializer().run()
      coordinator = coordinator_lib.Coordinator()
      queue_runner_impl.start_queue_runners(sess, coord=coordinator)
      true_param_loss = model_outputs.loss.eval(feed_dict=true_parameters)
      true_transformed_params = {
          param: param.eval(feed_dict=true_parameters)
          for param in derived_param_test_fn(generative_model)}
      coordinator.request_stop()
      coordinator.join()

  saving_hook = _SavingTensorHook(
      tensors=true_parameters.keys(),
      every_n_iter=train_iterations - 1)

  class _RunConfig(estimator_lib.RunConfig):

    @property
    def tf_random_seed(self):
      return seed

  estimator = estimators._TimeSeriesRegressor(  # pylint: disable=protected-access
      model=generative_model,
      config=_RunConfig(),
      state_manager=train_state_manager,
      optimizer=adam.AdamOptimizer(learning_rate))
  train_input_fn = train_input_fn_type(time_series_reader=time_series_reader)
  trained_loss = (estimator.train(
      input_fn=train_input_fn,
      max_steps=train_iterations,
      hooks=[saving_hook]).evaluate(
          input_fn=eval_input_fn, steps=1))["loss"]
  logging.info("Final trained loss: %f", trained_loss)
  logging.info("True parameter loss: %f", true_param_loss)
  return (ignore_params, true_parameters, true_transformed_params,
          trained_loss, true_param_loss, saving_hook,
          true_parameter_eval_graph)
예제 #5
0
def _train_on_generated_data(
    generate_fn,
    generative_model,
    train_iterations,
    seed,
    learning_rate=0.1,
    ignore_params_fn=lambda _: (),
    derived_param_test_fn=lambda _: (),
    train_input_fn_type=input_pipeline.WholeDatasetInputFn,
    train_state_manager=state_management.PassthroughStateManager()):
    """The training portion of parameter recovery tests."""
    random_seed.set_random_seed(seed)
    generate_graph = ops.Graph()
    with generate_graph.as_default():
        with session.Session(graph=generate_graph):
            generative_model.initialize_graph()
            time_series_reader, true_parameters = generate_fn(generative_model)
            true_parameters = {
                tensor.name: value
                for tensor, value in true_parameters.items()
            }
    eval_input_fn = input_pipeline.WholeDatasetInputFn(time_series_reader)
    eval_state_manager = state_management.PassthroughStateManager()
    true_parameter_eval_graph = ops.Graph()
    with true_parameter_eval_graph.as_default():
        generative_model.initialize_graph()
        ignore_params = ignore_params_fn(generative_model)
        feature_dict, _ = eval_input_fn()
        eval_state_manager.initialize_graph(generative_model)
        feature_dict[TrainEvalFeatures.VALUES] = math_ops.cast(
            feature_dict[TrainEvalFeatures.VALUES], generative_model.dtype)
        model_outputs = eval_state_manager.define_loss(
            model=generative_model,
            features=feature_dict,
            mode=estimator_lib.ModeKeys.EVAL)
        with session.Session(graph=true_parameter_eval_graph) as sess:
            variables.global_variables_initializer().run()
            coordinator = coordinator_lib.Coordinator()
            queue_runner_impl.start_queue_runners(sess, coord=coordinator)
            true_param_loss = model_outputs.loss.eval(
                feed_dict=true_parameters)
            true_transformed_params = {
                param: param.eval(feed_dict=true_parameters)
                for param in derived_param_test_fn(generative_model)
            }
            coordinator.request_stop()
            coordinator.join()

    saving_hook = _SavingTensorHook(tensors=true_parameters.keys(),
                                    every_n_iter=train_iterations - 1)

    class _RunConfig(estimator_lib.RunConfig):
        @property
        def tf_random_seed(self):
            return seed

    estimator = estimators._TimeSeriesRegressor(  # pylint: disable=protected-access
        model=generative_model,
        config=_RunConfig(),
        state_manager=train_state_manager,
        optimizer=adam.AdamOptimizer(learning_rate))
    train_input_fn = train_input_fn_type(time_series_reader=time_series_reader)
    trained_loss = (estimator.train(input_fn=train_input_fn,
                                    max_steps=train_iterations,
                                    hooks=[saving_hook
                                           ]).evaluate(input_fn=eval_input_fn,
                                                       steps=1))["loss"]
    logging.info("Final trained loss: %f", trained_loss)
    logging.info("True parameter loss: %f", true_param_loss)
    return (ignore_params, true_parameters, true_transformed_params,
            trained_loss, true_param_loss, saving_hook,
            true_parameter_eval_graph)
    raise NotImplementedError(
        "Exogenous inputs are not implemented for this example.")




csv_file_name = './data/multivariate_periods.csv'
# reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# csv_file_name = './data/red_bule_balls_2003.csv'
reader = tf.contrib.timeseries.CSVReader(
      csv_file_name,
      column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
                    + (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
print(reader)
data = reader.read_full()
print(data)
# estimator
# @see:https://mp.weixin.qq.com/s?__biz=MzI0ODcxODk5OA==&mid=2247489403&idx=1&sn=9f004963889104fd83307e48e9ec8709&chksm=e99d2482deeaad94ae1f1154b7c4de91eafa678d2feaebe9183987efd9b38287fdf8d2f3f04a&mpshare=1&scene=1&srcid=0925uD5dshUAN3UoUe9PD4mV&key=f32e811df94f16bf6c056cd27bde2d6cf2de557b042b10eebff3ec8204d94d8b19d5502a0f108ced8ea7006b76087c025b93cb3864bafee6a311dfd9990f974fe965643704ab1c791d9a2355d4b21f1e&ascene=0&uin=MTkyNTE5ODcwMw%3D%3D&devicetype=iMac+MacBookAir6%2C2+OSX+OSX+10.12.6+build(16G29)&version=12020810&nettype=WIFI&fontScale=100&pass_ticket=LQXTGodGb%2B7%2BPVkkVqV4W1EEZLoNH7Wi%2BnJxlbUYnFVsrEsp3ZsvKzcliL9Ezrm7
estimator = ts_estimators._TimeSeriesRegressor(
    model= _LSTMModel(num_features=5,num_units=128),
    optimizer = tf.train.AdamOptimizer(0.001)
)
print(estimator)

plt.figure(figsize=(15, 5))
# plt.plot(data['times'], data['values'], label='origin')
# plt.plot(evaluation['times'].reshape(-1), evaluation['mean'].reshape(-1), label='evaluation')
# plt.plot(predictions['times'].reshape(-1), predictions['mean'].reshape(-1), label='prediction')
# plt.xlabel('time_step')
plt.show()