Exemplo n.º 1
0
  def _fit_restore_fit_test_template(self, estimator_fn, dtype):
    """Tests restoring previously fit models."""
    model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    exogenous_feature_columns = (
        feature_column.numeric_column("exogenous"),
    )
    first_estimator = estimator_fn(model_dir, exogenous_feature_columns)
    times = numpy.arange(20, dtype=numpy.int64)
    values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
    exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
    features = {
        feature_keys.TrainEvalFeatures.TIMES: times,
        feature_keys.TrainEvalFeatures.VALUES: values,
        "exogenous": exogenous
    }
    train_input_fn = input_pipeline.RandomWindowInputFn(
        input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
        batch_size=16, window_size=16)
    eval_input_fn = input_pipeline.RandomWindowInputFn(
        input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
        batch_size=16, window_size=16)
    first_estimator.train(input_fn=train_input_fn, steps=1)
    first_evaluation = first_estimator.evaluate(
        input_fn=eval_input_fn, steps=1)
    first_loss_before_fit = first_evaluation["loss"]
    self.assertAllEqual(first_loss_before_fit, first_evaluation["average_loss"])
    self.assertAllEqual([], first_loss_before_fit.shape)
    first_estimator.train(input_fn=train_input_fn, steps=1)
    first_loss_after_fit = first_estimator.evaluate(
        input_fn=eval_input_fn, steps=1)["loss"]
    self.assertAllEqual([], first_loss_after_fit.shape)
    second_estimator = estimator_fn(model_dir, exogenous_feature_columns)
    second_estimator.train(input_fn=train_input_fn, steps=1)
    whole_dataset_input_fn = input_pipeline.WholeDatasetInputFn(
        input_pipeline.NumpyReader(features))
    whole_dataset_evaluation = second_estimator.evaluate(
        input_fn=whole_dataset_input_fn, steps=1)
    exogenous_values_ten_steps = {
        "exogenous": numpy.arange(
            10, dtype=dtype.as_numpy_dtype)[None, :, None]
    }
    predict_input_fn = input_pipeline.predict_continuation_input_fn(
        evaluation=whole_dataset_evaluation,
        exogenous_features=exogenous_values_ten_steps,
        steps=10)
    # Also tests that limit_epochs in predict_continuation_input_fn prevents
    # infinite iteration
    (estimator_predictions,
    ) = list(second_estimator.predict(input_fn=predict_input_fn))
    self.assertAllEqual([10, 1], estimator_predictions["mean"].shape)
    input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn()
    export_location = first_estimator.export_savedmodel(self.get_temp_dir(),
                                                        input_receiver_fn)
    with ops.Graph().as_default():
      with session.Session() as sess:
        signatures = loader.load(sess, [tag_constants.SERVING], export_location)
        # Test that prediction and filtering can continue from evaluation output
        saved_prediction = saved_model_utils.predict_continuation(
            continue_from=whole_dataset_evaluation,
            steps=10,
            exogenous_features=exogenous_values_ten_steps,
            signatures=signatures,
            session=sess)
        # Saved model predictions should be the same as Estimator predictions
        # starting from the same evaluation.
        for prediction_key, prediction_value in estimator_predictions.items():
          self.assertAllClose(prediction_value,
                              numpy.squeeze(
                                  saved_prediction[prediction_key], axis=0))
        first_filtering = saved_model_utils.filter_continuation(
            continue_from=whole_dataset_evaluation,
            features={
                feature_keys.FilteringFeatures.TIMES: times[None, -1] + 2,
                feature_keys.FilteringFeatures.VALUES: values[None, -1] + 2.,
                "exogenous": values[None, -1, None] + 12.
            },
            signatures=signatures,
            session=sess)
        # Test that prediction and filtering can continue from filtering output
        second_saved_prediction = saved_model_utils.predict_continuation(
            continue_from=first_filtering,
            steps=1,
            exogenous_features={
                "exogenous": numpy.arange(
                    1, dtype=dtype.as_numpy_dtype)[None, :, None]
            },
            signatures=signatures,
            session=sess)
        self.assertEqual(
            times[-1] + 3,
            numpy.squeeze(
                second_saved_prediction[feature_keys.PredictionResults.TIMES]))
        saved_model_utils.filter_continuation(
            continue_from=first_filtering,
            features={
                feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
                feature_keys.FilteringFeatures.VALUES: values[-1] + 3.,
                "exogenous": values[-1, None] + 13.
            },
            signatures=signatures,
            session=sess)

        # Test cold starting
        six.assertCountEqual(
            self,
            [feature_keys.FilteringFeatures.TIMES,
             feature_keys.FilteringFeatures.VALUES,
             "exogenous"],
            signatures.signature_def[
                feature_keys.SavedModelLabels.COLD_START_FILTER].inputs.keys())
        batch_numpy_times = numpy.tile(
            numpy.arange(30, dtype=numpy.int64)[None, :], (10, 1))
        batch_numpy_values = numpy.ones([10, 30, 1])
        state = saved_model_utils.cold_start_filter(
            signatures=signatures,
            session=sess,
            features={
                feature_keys.FilteringFeatures.TIMES: batch_numpy_times,
                feature_keys.FilteringFeatures.VALUES: batch_numpy_values,
                "exogenous": 10. + batch_numpy_values
            }
        )
        predict_times = numpy.tile(
            numpy.arange(30, 45, dtype=numpy.int64)[None, :], (10, 1))
        predictions = saved_model_utils.predict_continuation(
            continue_from=state,
            times=predict_times,
            exogenous_features={
                "exogenous": numpy.tile(numpy.arange(
                    15, dtype=dtype.as_numpy_dtype), (10,))[None, :, None]
            },
            signatures=signatures,
            session=sess)
        self.assertAllEqual([10, 15, 1], predictions["mean"].shape)
 def test_savedmodel_state_override(self):
   random_model = RandomStateSpaceModel(
       state_dimension=5,
       state_noise_dimension=4,
       configuration=state_space_model.StateSpaceModelConfiguration(
           exogenous_feature_columns=[layers.real_valued_column("exogenous")],
           dtype=dtypes.float64, num_features=1))
   estimator = estimators.StateSpaceRegressor(
       model=random_model,
       optimizer=gradient_descent.GradientDescentOptimizer(0.1))
   combined_input_fn = input_pipeline.WholeDatasetInputFn(
       input_pipeline.NumpyReader({
           feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
           feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
           "exogenous": [-1., -2., -3., -4.]
       }))
   estimator.train(combined_input_fn, steps=1)
   export_location = estimator.export_savedmodel(
       self.get_temp_dir(),
       estimator.build_raw_serving_input_receiver_fn())
   with ops.Graph().as_default() as graph:
     random_model.initialize_graph()
     with self.session(graph=graph) as session:
       variables.global_variables_initializer().run()
       evaled_start_state = session.run(random_model.get_start_state())
   evaled_start_state = [
       state_element[None, ...] for state_element in evaled_start_state]
   with ops.Graph().as_default() as graph:
     with self.session(graph=graph) as session:
       signatures = loader.load(
           session, [tag_constants.SERVING], export_location)
       first_split_filtering = saved_model_utils.filter_continuation(
           continue_from={
               feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
           signatures=signatures,
           session=session,
           features={
               feature_keys.FilteringFeatures.TIMES: [1, 2],
               feature_keys.FilteringFeatures.VALUES: [1., 2.],
               "exogenous": [[-1.], [-2.]]})
       second_split_filtering = saved_model_utils.filter_continuation(
           continue_from=first_split_filtering,
           signatures=signatures,
           session=session,
           features={
               feature_keys.FilteringFeatures.TIMES: [3, 4],
               feature_keys.FilteringFeatures.VALUES: [3., 4.],
               "exogenous": [[-3.], [-4.]]
           })
       combined_filtering = saved_model_utils.filter_continuation(
           continue_from={
               feature_keys.FilteringResults.STATE_TUPLE: evaled_start_state},
           signatures=signatures,
           session=session,
           features={
               feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
               feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
               "exogenous": [[-1.], [-2.], [-3.], [-4.]]
           })
       split_predict = saved_model_utils.predict_continuation(
           continue_from=second_split_filtering,
           signatures=signatures,
           session=session,
           steps=1,
           exogenous_features={
               "exogenous": [[[-5.]]]})
       combined_predict = saved_model_utils.predict_continuation(
           continue_from=combined_filtering,
           signatures=signatures,
           session=session,
           steps=1,
           exogenous_features={
               "exogenous": [[[-5.]]]})
   for state_key, combined_state_value in combined_filtering.items():
     if state_key == feature_keys.FilteringResults.TIMES:
       continue
     self.assertAllClose(
         combined_state_value, second_split_filtering[state_key])
   for prediction_key, combined_value in combined_predict.items():
     self.assertAllClose(combined_value, split_predict[prediction_key])
Exemplo n.º 3
0
 def test_savedmodel_state_override(self):
     random_model = RandomStateSpaceModel(
         state_dimension=5,
         state_noise_dimension=4,
         configuration=state_space_model.StateSpaceModelConfiguration(
             exogenous_feature_columns=[
                 layers.real_valued_column("exogenous")
             ],
             dtype=dtypes.float64,
             num_features=1))
     estimator = estimators.StateSpaceRegressor(
         model=random_model,
         optimizer=gradient_descent.GradientDescentOptimizer(0.1))
     combined_input_fn = input_pipeline.WholeDatasetInputFn(
         input_pipeline.NumpyReader({
             feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
             feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
             "exogenous": [-1., -2., -3., -4.]
         }))
     estimator.train(combined_input_fn, steps=1)
     export_location = estimator.export_savedmodel(
         self.get_temp_dir(),
         estimator.build_raw_serving_input_receiver_fn(
             exogenous_features={
                 "exogenous": numpy.zeros((0, 0), dtype=numpy.float32)
             }))
     with ops.Graph().as_default() as graph:
         random_model.initialize_graph()
         with self.test_session(graph=graph) as session:
             variables.global_variables_initializer().run()
             evaled_start_state = session.run(
                 random_model.get_start_state())
     evaled_start_state = [
         state_element[None, ...] for state_element in evaled_start_state
     ]
     with ops.Graph().as_default() as graph:
         with self.test_session(graph=graph) as session:
             signatures = loader.load(session, [tag_constants.SERVING],
                                      export_location)
             first_split_filtering = saved_model_utils.filter_continuation(
                 continue_from={
                     feature_keys.FilteringResults.STATE_TUPLE:
                     evaled_start_state
                 },
                 signatures=signatures,
                 session=session,
                 features={
                     feature_keys.FilteringFeatures.TIMES: [1, 2],
                     feature_keys.FilteringFeatures.VALUES: [1., 2.],
                     "exogenous": [-1., -2.]
                 })
             second_split_filtering = saved_model_utils.filter_continuation(
                 continue_from=first_split_filtering,
                 signatures=signatures,
                 session=session,
                 features={
                     feature_keys.FilteringFeatures.TIMES: [3, 4],
                     feature_keys.FilteringFeatures.VALUES: [3., 4.],
                     "exogenous": [-3., -4.]
                 })
             combined_filtering = saved_model_utils.filter_continuation(
                 continue_from={
                     feature_keys.FilteringResults.STATE_TUPLE:
                     evaled_start_state
                 },
                 signatures=signatures,
                 session=session,
                 features={
                     feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
                     feature_keys.FilteringFeatures.VALUES:
                     [1., 2., 3., 4.],
                     "exogenous": [-1., -2., -3., -4.]
                 })
             split_predict = saved_model_utils.predict_continuation(
                 continue_from=second_split_filtering,
                 signatures=signatures,
                 session=session,
                 steps=1,
                 exogenous_features={"exogenous": [[-5.]]})
             combined_predict = saved_model_utils.predict_continuation(
                 continue_from=combined_filtering,
                 signatures=signatures,
                 session=session,
                 steps=1,
                 exogenous_features={"exogenous": [[-5.]]})
     for state_key, combined_state_value in combined_filtering.items():
         if state_key == feature_keys.FilteringResults.TIMES:
             continue
         self.assertAllClose(combined_state_value,
                             second_split_filtering[state_key])
     for prediction_key, combined_value in combined_predict.items():
         self.assertAllClose(combined_value, split_predict[prediction_key])
    def _fit_restore_fit_test_template(self, estimator_fn, dtype):
        """Tests restoring previously fit models."""
        model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
        exogenous_feature_columns = (
            feature_column.numeric_column("exogenous"), )
        first_estimator = estimator_fn(model_dir, exogenous_feature_columns)
        times = numpy.arange(20, dtype=numpy.int64)
        values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
        exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
        features = {
            feature_keys.TrainEvalFeatures.TIMES: times,
            feature_keys.TrainEvalFeatures.VALUES: values,
            "exogenous": exogenous
        }
        train_input_fn = input_pipeline.RandomWindowInputFn(
            input_pipeline.NumpyReader(features),
            shuffle_seed=2,
            num_threads=1,
            batch_size=16,
            window_size=16)
        eval_input_fn = input_pipeline.RandomWindowInputFn(
            input_pipeline.NumpyReader(features),
            shuffle_seed=3,
            num_threads=1,
            batch_size=16,
            window_size=16)
        first_estimator.train(input_fn=train_input_fn, steps=5)
        first_loss_before_fit = first_estimator.evaluate(
            input_fn=eval_input_fn, steps=1)["loss"]
        first_estimator.train(input_fn=train_input_fn, steps=50)
        first_loss_after_fit = first_estimator.evaluate(input_fn=eval_input_fn,
                                                        steps=1)["loss"]
        self.assertLess(first_loss_after_fit, first_loss_before_fit)
        second_estimator = estimator_fn(model_dir, exogenous_feature_columns)
        second_estimator.train(input_fn=train_input_fn, steps=2)
        whole_dataset_input_fn = input_pipeline.WholeDatasetInputFn(
            input_pipeline.NumpyReader(features))
        whole_dataset_evaluation = second_estimator.evaluate(
            input_fn=whole_dataset_input_fn, steps=1)
        exogenous_values_ten_steps = {
            "exogenous":
            numpy.arange(10, dtype=dtype.as_numpy_dtype)[None, :, None]
        }
        predict_input_fn = input_pipeline.predict_continuation_input_fn(
            evaluation=whole_dataset_evaluation,
            exogenous_features=exogenous_values_ten_steps,
            steps=10)
        # Also tests that limit_epochs in predict_continuation_input_fn prevents
        # infinite iteration
        (estimator_predictions, ) = list(
            second_estimator.predict(input_fn=predict_input_fn))
        self.assertAllEqual([10, 1], estimator_predictions["mean"].shape)
        input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn(
        )
        export_location = first_estimator.export_savedmodel(
            self.get_temp_dir(), input_receiver_fn)
        with ops.Graph().as_default():
            with session.Session() as sess:
                signatures = loader.load(sess, [tag_constants.SERVING],
                                         export_location)
                # Test that prediction and filtering can continue from evaluation output
                saved_prediction = saved_model_utils.predict_continuation(
                    continue_from=whole_dataset_evaluation,
                    steps=10,
                    exogenous_features=exogenous_values_ten_steps,
                    signatures=signatures,
                    session=sess)
                # Saved model predictions should be the same as Estimator predictions
                # starting from the same evaluation.
                for prediction_key, prediction_value in estimator_predictions.items(
                ):
                    self.assertAllClose(
                        prediction_value,
                        numpy.squeeze(saved_prediction[prediction_key],
                                      axis=0))
                first_filtering = saved_model_utils.filter_continuation(
                    continue_from=whole_dataset_evaluation,
                    features={
                        feature_keys.FilteringFeatures.TIMES:
                        times[None, -1] + 2,
                        feature_keys.FilteringFeatures.VALUES:
                        values[None, -1] + 2.,
                        "exogenous": values[None, -1, None] + 12.
                    },
                    signatures=signatures,
                    session=sess)
                # Test that prediction and filtering can continue from filtering output
                second_saved_prediction = saved_model_utils.predict_continuation(
                    continue_from=first_filtering,
                    steps=1,
                    exogenous_features={
                        "exogenous":
                        numpy.arange(1, dtype=dtype.as_numpy_dtype)[None, :,
                                                                    None]
                    },
                    signatures=signatures,
                    session=sess)
                self.assertEqual(
                    times[-1] + 3,
                    numpy.squeeze(second_saved_prediction[
                        feature_keys.PredictionResults.TIMES]))
                saved_model_utils.filter_continuation(
                    continue_from=first_filtering,
                    features={
                        feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
                        feature_keys.FilteringFeatures.VALUES: values[-1] + 3.,
                        "exogenous": values[-1, None] + 13.
                    },
                    signatures=signatures,
                    session=sess)

                # Test cold starting
                six.assertCountEqual(
                    self, [
                        feature_keys.FilteringFeatures.TIMES,
                        feature_keys.FilteringFeatures.VALUES, "exogenous"
                    ],
                    signatures.signature_def[feature_keys.SavedModelLabels.
                                             COLD_START_FILTER].inputs.keys())
                batch_numpy_times = numpy.tile(
                    numpy.arange(30, dtype=numpy.int64)[None, :], (10, 1))
                batch_numpy_values = numpy.ones([10, 30, 1])
                state = saved_model_utils.cold_start_filter(
                    signatures=signatures,
                    session=sess,
                    features={
                        feature_keys.FilteringFeatures.TIMES:
                        batch_numpy_times,
                        feature_keys.FilteringFeatures.VALUES:
                        batch_numpy_values,
                        "exogenous": 10. + batch_numpy_values
                    })
                predict_times = numpy.tile(
                    numpy.arange(30, 45, dtype=numpy.int64)[None, :], (10, 1))
                predictions = saved_model_utils.predict_continuation(
                    continue_from=state,
                    times=predict_times,
                    exogenous_features={
                        "exogenous":
                        numpy.tile(
                            numpy.arange(15, dtype=dtype.as_numpy_dtype),
                            (10, ))[None, :, None]
                    },
                    signatures=signatures,
                    session=sess)
                self.assertAllEqual([10, 15, 1], predictions["mean"].shape)
Exemplo n.º 5
0
 def _fit_restore_fit_test_template(self, estimator_fn, dtype):
   """Tests restoring previously fit models."""
   model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
   first_estimator = estimator_fn(model_dir)
   times = numpy.arange(20, dtype=numpy.int64)
   values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
   features = {
       feature_keys.TrainEvalFeatures.TIMES: times,
       feature_keys.TrainEvalFeatures.VALUES: values
   }
   train_input_fn = input_pipeline.RandomWindowInputFn(
       input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
       batch_size=16, window_size=16)
   eval_input_fn = input_pipeline.RandomWindowInputFn(
       input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
       batch_size=16, window_size=16)
   first_estimator.train(input_fn=train_input_fn, steps=5)
   first_loss_before_fit = first_estimator.evaluate(
       input_fn=eval_input_fn, steps=1)["loss"]
   first_estimator.train(input_fn=train_input_fn, steps=50)
   first_loss_after_fit = first_estimator.evaluate(
       input_fn=eval_input_fn, steps=1)["loss"]
   self.assertLess(first_loss_after_fit, first_loss_before_fit)
   second_estimator = estimator_fn(model_dir)
   second_estimator.train(input_fn=train_input_fn, steps=2)
   whole_dataset_input_fn = input_pipeline.WholeDatasetInputFn(
       input_pipeline.NumpyReader(features))
   whole_dataset_evaluation = second_estimator.evaluate(
       input_fn=whole_dataset_input_fn, steps=1)
   predict_input_fn = input_pipeline.predict_continuation_input_fn(
       evaluation=whole_dataset_evaluation,
       steps=10)
   # Also tests that limit_epochs in predict_continuation_input_fn prevents
   # infinite iteration
   (estimator_predictions,
   ) = list(second_estimator.predict(input_fn=predict_input_fn))
   self.assertAllEqual([10, 1], estimator_predictions["mean"].shape)
   input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn()
   export_location = first_estimator.export_savedmodel(self.get_temp_dir(),
                                                       input_receiver_fn)
   with ops.Graph().as_default():
     with session.Session() as sess:
       signatures = loader.load(sess, [tag_constants.SERVING], export_location)
       # Test that prediction and filtering can continue from evaluation output
       saved_prediction = saved_model_utils.predict_continuation(
           continue_from=whole_dataset_evaluation,
           steps=10,
           signatures=signatures,
           session=sess)
       # Saved model predictions should be the same as Estimator predictions
       # starting from the same evaluation.
       for prediction_key, prediction_value in estimator_predictions.items():
         self.assertAllClose(prediction_value,
                             numpy.squeeze(
                                 saved_prediction[prediction_key], axis=0))
       first_filtering = saved_model_utils.filter_continuation(
           continue_from=whole_dataset_evaluation,
           features={
               feature_keys.FilteringFeatures.TIMES: times[None, -1] + 2,
               feature_keys.FilteringFeatures.VALUES: values[None, -1] + 2.
           },
           signatures=signatures,
           session=sess)
       # Test that prediction and filtering can continue from filtering output
       second_saved_prediction = saved_model_utils.predict_continuation(
           continue_from=first_filtering,
           steps=1,
           signatures=signatures,
           session=sess)
       self.assertEqual(
           times[-1] + 3,
           numpy.squeeze(
               second_saved_prediction[feature_keys.PredictionResults.TIMES]))
       saved_model_utils.filter_continuation(
           continue_from=first_filtering,
           features={
               feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
               feature_keys.FilteringFeatures.VALUES: values[-1] + 3.
           },
           signatures=signatures,
           session=sess)