Пример #1
0
 def test_multivariate(self):
     dtype = dtypes.float32
     num_features = 3
     covariance = numpy.eye(num_features)
     # A single off-diagonal has a non-zero value in the true transition
     # noise covariance.
     covariance[-1, 0] = 1.
     covariance[0, -1] = 1.
     dataset_size = 100
     values = numpy.cumsum(numpy.random.multivariate_normal(
         mean=numpy.zeros(num_features), cov=covariance, size=dataset_size),
                           axis=0)
     times = numpy.arange(dataset_size)
     model = MultivariateLevelModel(
         configuration=state_space_model.StateSpaceModelConfiguration(
             num_features=num_features,
             dtype=dtype,
             use_observation_noise=False,
             transition_covariance_initial_log_scale_bias=5.))
     estimator = estimators.StateSpaceRegressor(
         model=model,
         optimizer=gradient_descent.GradientDescentOptimizer(0.1))
     data = {
         feature_keys.TrainEvalFeatures.TIMES: times,
         feature_keys.TrainEvalFeatures.VALUES: values
     }
     train_input_fn = input_pipeline.RandomWindowInputFn(
         input_pipeline.NumpyReader(data), batch_size=16, window_size=16)
     estimator.train(input_fn=train_input_fn, steps=1)
     for component in model._ensemble_members:
         # Check that input statistics propagated to component models
         self.assertTrue(component._input_statistics)
Пример #2
0
 def _time_dependency_test_template(self, model_type):
     """Test that a time-dependent observation model influences predictions."""
     model = model_type()
     estimator = estimators.StateSpaceRegressor(
         model=model,
         optimizer=gradient_descent.GradientDescentOptimizer(0.1))
     values = numpy.reshape([1., 2., 3., 4.], newshape=[1, 4, 1])
     input_fn = input_pipeline.WholeDatasetInputFn(
         input_pipeline.NumpyReader({
             feature_keys.TrainEvalFeatures.TIMES: [[0, 1, 2, 3]],
             feature_keys.TrainEvalFeatures.VALUES:
             values
         }))
     estimator.train(input_fn=input_fn, max_steps=1)
     predicted_values = estimator.evaluate(input_fn=input_fn,
                                           steps=1)["mean"]
     # Throw out the first value so we don't test the prior
     self.assertAllEqual(values[1:], predicted_values[1:])
Пример #3
0
 def test_loop_unrolling(self):
     """Tests running/restoring from a checkpoint with static unrolling."""
     model = TimeDependentStateSpaceModel(
         # Unroll during training, but not evaluation
         static_unrolling_window_size_threshold=2)
     estimator = estimators.StateSpaceRegressor(model=model)
     times = numpy.arange(100)
     values = numpy.arange(100)
     dataset = {
         feature_keys.TrainEvalFeatures.TIMES: times,
         feature_keys.TrainEvalFeatures.VALUES: values
     }
     train_input_fn = input_pipeline.RandomWindowInputFn(
         input_pipeline.NumpyReader(dataset), batch_size=16, window_size=2)
     eval_input_fn = input_pipeline.WholeDatasetInputFn(
         input_pipeline.NumpyReader(dataset))
     estimator.train(input_fn=train_input_fn, max_steps=1)
     estimator.evaluate(input_fn=eval_input_fn, steps=1)
Пример #4
0
    def dry_run_train_helper(self,
                             sample_every,
                             period,
                             num_samples,
                             model_type,
                             model_args,
                             num_features=1):
        numpy.random.seed(1)
        dtype = dtypes.float32
        features = self.simple_data(sample_every,
                                    dtype=dtype,
                                    period=period,
                                    num_samples=num_samples,
                                    num_features=num_features)
        model = model_type(
            configuration=(state_space_model.StateSpaceModelConfiguration(
                num_features=num_features,
                dtype=dtype,
                covariance_prior_fn=lambda _: 0.)),
            **model_args)

        class _RunConfig(estimator_lib.RunConfig):
            @property
            def tf_random_seed(self):
                return 4

        estimator = estimators.StateSpaceRegressor(model, config=_RunConfig())
        train_input_fn = input_pipeline.RandomWindowInputFn(
            input_pipeline.NumpyReader(features),
            num_threads=1,
            shuffle_seed=1,
            batch_size=16,
            window_size=16)
        eval_input_fn = input_pipeline.WholeDatasetInputFn(
            input_pipeline.NumpyReader(features))
        estimator.train(input_fn=train_input_fn, max_steps=1)
        first_evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
        estimator.train(input_fn=train_input_fn, max_steps=3)
        second_evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
        self.assertLess(second_evaluation["loss"], first_evaluation["loss"])
Пример #5
0
 def test_savedmodel_state_override(self):
     random_model = RandomStateSpaceModel(
         state_dimension=5,
         state_noise_dimension=4,
         configuration=state_space_model.StateSpaceModelConfiguration(
             exogenous_feature_columns=[
                 layers.real_valued_column("exogenous")
             ],
             dtype=dtypes.float64,
             num_features=1))
     estimator = estimators.StateSpaceRegressor(
         model=random_model,
         optimizer=gradient_descent.GradientDescentOptimizer(0.1))
     combined_input_fn = input_pipeline.WholeDatasetInputFn(
         input_pipeline.NumpyReader({
             feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
             feature_keys.FilteringFeatures.VALUES: [1., 2., 3., 4.],
             "exogenous": [-1., -2., -3., -4.]
         }))
     estimator.train(combined_input_fn, steps=1)
     export_location = estimator.export_saved_model(
         self.get_temp_dir(),
         estimator.build_raw_serving_input_receiver_fn())
     with ops.Graph().as_default() as graph:
         random_model.initialize_graph()
         with self.session(graph=graph) as session:
             variables.global_variables_initializer().run()
             evaled_start_state = session.run(
                 random_model.get_start_state())
     evaled_start_state = [
         state_element[None, ...] for state_element in evaled_start_state
     ]
     with ops.Graph().as_default() as graph:
         with self.session(graph=graph) as session:
             signatures = loader.load(session, [tag_constants.SERVING],
                                      export_location)
             first_split_filtering = saved_model_utils.filter_continuation(
                 continue_from={
                     feature_keys.FilteringResults.STATE_TUPLE:
                     evaled_start_state
                 },
                 signatures=signatures,
                 session=session,
                 features={
                     feature_keys.FilteringFeatures.TIMES: [1, 2],
                     feature_keys.FilteringFeatures.VALUES: [1., 2.],
                     "exogenous": [[-1.], [-2.]]
                 })
             second_split_filtering = saved_model_utils.filter_continuation(
                 continue_from=first_split_filtering,
                 signatures=signatures,
                 session=session,
                 features={
                     feature_keys.FilteringFeatures.TIMES: [3, 4],
                     feature_keys.FilteringFeatures.VALUES: [3., 4.],
                     "exogenous": [[-3.], [-4.]]
                 })
             combined_filtering = saved_model_utils.filter_continuation(
                 continue_from={
                     feature_keys.FilteringResults.STATE_TUPLE:
                     evaled_start_state
                 },
                 signatures=signatures,
                 session=session,
                 features={
                     feature_keys.FilteringFeatures.TIMES: [1, 2, 3, 4],
                     feature_keys.FilteringFeatures.VALUES:
                     [1., 2., 3., 4.],
                     "exogenous": [[-1.], [-2.], [-3.], [-4.]]
                 })
             split_predict = saved_model_utils.predict_continuation(
                 continue_from=second_split_filtering,
                 signatures=signatures,
                 session=session,
                 steps=1,
                 exogenous_features={"exogenous": [[[-5.]]]})
             combined_predict = saved_model_utils.predict_continuation(
                 continue_from=combined_filtering,
                 signatures=signatures,
                 session=session,
                 steps=1,
                 exogenous_features={"exogenous": [[[-5.]]]})
     for state_key, combined_state_value in combined_filtering.items():
         if state_key == feature_keys.FilteringResults.TIMES:
             continue
         self.assertAllClose(combined_state_value,
                             second_split_filtering[state_key])
     for prediction_key, combined_value in combined_predict.items():
         self.assertAllClose(combined_value, split_predict[prediction_key])