Ejemplo n.º 1
0
def _custom_time_series_regressor(
    model_dir, head_type, exogenous_feature_columns):
  return ts_estimators.TimeSeriesRegressor(
      model=lstm_example._LSTMModel(
          num_features=5, num_units=128,
          exogenous_feature_columns=exogenous_feature_columns),
      optimizer=adam.AdamOptimizer(0.001),
      config=estimator_lib.RunConfig(tf_random_seed=4),
      state_manager=state_management.ChainingStateManager(),
      head_type=head_type,
      model_dir=model_dir)
Ejemplo n.º 2
0
    def test_custom_metrics(self):
        """Tests that the custom metrics can be applied to the estimator."""
        model_dir = self.get_temp_dir()
        estimator = ts_estimators.TimeSeriesRegressor(
            model=lstm_example._LSTMModel(num_features=1, num_units=4),
            optimizer=adam.AdamOptimizer(0.001),
            config=estimator_lib.RunConfig(tf_random_seed=4),
            model_dir=model_dir)

        def input_fn():
            return {
                feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
                feature_keys.TrainEvalFeatures.VALUES:
                numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
            }

        def metrics_fn(predictions, features):
            # checking that the inputs are properly passed.
            predict = predictions["mean"]
            target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
            return {
                "plain_boring_metric386":
                (math_ops.reduce_mean(math_ops.abs(predict - target)),
                 control_flow_ops.no_op()),
                "fun_metric101": (math_ops.reduce_sum(predict + target),
                                  control_flow_ops.no_op()),
            }

        # Evaluation without training is enough for testing custom metrics.
        estimator = extenders.add_metrics(estimator, metrics_fn)
        evaluation = estimator.evaluate(input_fn, steps=1)
        self.assertIn("plain_boring_metric386", evaluation)
        self.assertIn("fun_metric101", evaluation)
        self.assertIn("average_loss", evaluation)
        # The values are deterministic because of fixed tf_random_seed.
        # However if they become flaky, remove such exacts comparisons.
        self.assertAllClose(evaluation["plain_boring_metric386"], 1.130380)
        self.assertAllClose(evaluation["fun_metric101"], 10.435442)
Ejemplo n.º 3
0
params['total_steps_train'] = params['steps_per_epoch'] * epochs_to_train
params['throttle_eval'] = throttle_mins * 60
params['momentum'] = 0.9
params['bias_reduction'] = 0.1
params['epochs_to_reduce_at'] = [40, 120]
params['initial_learning_rate'] = 0.1
params['epoch_reduction_factor'] = 0.1
params['mixup_val'] = 0.7
pprint(params)

# get data loader
cifar_data = CIFAR10(batch_size=params['batch_size'],
                     mixup_val=params['mixup_val'])

run_config = estimator.RunConfig(
    save_checkpoints_steps=params['steps_per_epoch'],
    save_summary_steps=500,
    keep_checkpoint_max=5)

fixup_estimator = estimator.Estimator(model_dir=model_dir,
                                      model_fn=model.model_fn,
                                      params=params,
                                      config=run_config)

# training/evaluation specs for run
train_spec = estimator.TrainSpec(input_fn=cifar_data.build_training_data,
                                 max_steps=params['total_steps_train'])
eval_spec = estimator.EvalSpec(input_fn=cifar_data.build_validation_data,
                               steps=None,
                               throttle_secs=params['throttle_eval'],
                               start_delay_secs=0)
Ejemplo n.º 4
0
 def test_one_shot_prediction_head_export(self):
     model_dir = self.get_temp_dir()
     categorical_column = feature_column.categorical_column_with_hash_bucket(
         key="categorical_exogenous_feature", hash_bucket_size=16)
     exogenous_feature_columns = [
         feature_column.numeric_column("2d_exogenous_feature", shape=(2, )),
         feature_column.embedding_column(
             categorical_column=categorical_column, dimension=10)
     ]
     estimator = ts_estimators.TimeSeriesRegressor(
         model=lstm_example._LSTMModel(
             num_features=5,
             num_units=128,
             exogenous_feature_columns=exogenous_feature_columns),
         optimizer=adam.AdamOptimizer(0.001),
         config=estimator_lib.RunConfig(tf_random_seed=4),
         state_manager=state_management.ChainingStateManager(),
         head_type=ts_head_lib.OneShotPredictionHead,
         model_dir=model_dir)
     train_features = {
         feature_keys.TrainEvalFeatures.TIMES:
         numpy.arange(20, dtype=numpy.int64),
         feature_keys.TrainEvalFeatures.VALUES:
         numpy.tile(numpy.arange(20, dtype=numpy.float32)[:, None], [1, 5]),
         "2d_exogenous_feature":
         numpy.ones([20, 2]),
         "categorical_exogenous_feature":
         numpy.array(["strkey"] * 20)[:, None]
     }
     train_input_fn = input_pipeline.RandomWindowInputFn(
         input_pipeline.NumpyReader(train_features),
         shuffle_seed=2,
         num_threads=1,
         batch_size=16,
         window_size=16)
     estimator.train(input_fn=train_input_fn, steps=5)
     input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
     export_location = estimator.export_savedmodel(self.get_temp_dir(),
                                                   input_receiver_fn)
     graph = ops.Graph()
     with graph.as_default():
         with session_lib.Session() as session:
             signatures = loader.load(session, [tag_constants.SERVING],
                                      export_location)
             self.assertEqual([feature_keys.SavedModelLabels.PREDICT],
                              list(signatures.signature_def.keys()))
             predict_signature = signatures.signature_def[
                 feature_keys.SavedModelLabels.PREDICT]
             six.assertCountEqual(self, [
                 feature_keys.FilteringFeatures.TIMES,
                 feature_keys.FilteringFeatures.VALUES,
                 "2d_exogenous_feature", "categorical_exogenous_feature"
             ], predict_signature.inputs.keys())
             features = {
                 feature_keys.TrainEvalFeatures.TIMES:
                 numpy.tile(
                     numpy.arange(35, dtype=numpy.int64)[None, :], [2, 1]),
                 feature_keys.TrainEvalFeatures.VALUES:
                 numpy.tile(
                     numpy.arange(20, dtype=numpy.float32)[None, :, None],
                     [2, 1, 5]),
                 "2d_exogenous_feature":
                 numpy.ones([2, 35, 2]),
                 "categorical_exogenous_feature":
                 numpy.tile(
                     numpy.array(["strkey"] * 35)[None, :, None], [2, 1, 1])
             }
             feeds = {
                 graph.as_graph_element(input_value.name):
                 features[input_key]
                 for input_key, input_value in
                 predict_signature.inputs.items()
             }
             fetches = {
                 output_key: graph.as_graph_element(output_value.name)
                 for output_key, output_value in
                 predict_signature.outputs.items()
             }
             output = session.run(fetches, feed_dict=feeds)
             self.assertAllEqual((2, 15, 5), output["mean"].shape)
Ejemplo n.º 5
0
    def compile(self, run_config, envir_config, model_fn):
        import logging
        import sys
        # Config logging
        tf.logging.set_verbosity(logging.INFO)
        handlers = [
            logging.FileHandler(os.path.join(run_config.store_dir,
                                             'main.log')),
            logging.StreamHandler(sys.stdout)
        ]
        logging.getLogger('tensorflow').handlers = handlers
        tf.logging.set_verbosity(tf.logging.INFO)
        self.test_dir = run_config.test_dir
        start = time.time()
        session_config = self._create_session_config(envir_config)
        exe_config = estimator.RunConfig(
            model_dir=run_config.model_dir,
            session_config=session_config,
            save_summary_steps=run_config.save_summary_steps,
            keep_checkpoint_max=run_config.keep_checkpoint_max,
            save_checkpoints_steps=run_config.save_checkpoints_steps,
            keep_checkpoint_every_n_hours=run_config.
            keep_checkpoint_every_n_hours)

        def _model_fn(features, labels, mode):
            if mode == estimator.ModeKeys.TRAIN:
                loss, accuracy, var_list, hooks = model_fn[mode](features,
                                                                 labels,
                                                                 run_config)
                # Learning rate
                # todo organize lr and optimizer configuration
                learning_rate = run_config.learning_rate
                if run_config.scheduler == 'exponential':
                    learning_rate = tf.train.exponential_decay(
                        learning_rate=learning_rate,
                        global_step=tf.train.get_or_create_global_step(),
                        decay_steps=run_config.decay_steps,
                        decay_rate=run_config.decay_rate,
                        staircase=run_config.staircase)
                elif run_config.scheduler == 'step':
                    learning_rate = step_lr(boundaries=run_config.boundaries,
                                            values=run_config.lr_values)
                else:
                    learning_rate = tf.constant(learning_rate,
                                                dtype=tf.float32)
                tf.summary.scalar('lr', learning_rate)
                # Optimizer
                optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
                # Hook
                hooks += [
                    LoggerHook(learning_rate=learning_rate,
                               log_frequency=run_config.log_frequency,
                               batch_size=run_config.batch_size,
                               loss=loss,
                               accuracy=accuracy,
                               metric_names=run_config.class_names)
                ]
                if hasattr(run_config, 'lr_multiplier'):
                    train_op = multi_lr(optimizer, loss, var_list,
                                        run_config.lr_multiplier)
                else:
                    train_op = optimizer.minimize(
                        loss,
                        global_step=tf.train.get_global_step(),
                        var_list=var_list)
                return estimator.EstimatorSpec(estimator.ModeKeys.TRAIN,
                                               loss=loss,
                                               training_hooks=hooks,
                                               train_op=train_op)
            elif mode == estimator.ModeKeys.EVAL:
                loss, metrics = model_fn[mode](features, labels, run_config)
                return estimator.EstimatorSpec(estimator.ModeKeys.EVAL,
                                               loss=loss,
                                               eval_metric_ops=metrics)
            elif mode == estimator.ModeKeys.PREDICT:
                predictions = model_fn[mode](features, run_config)
                return estimator.EstimatorSpec(estimator.ModeKeys.PREDICT,
                                               predictions)
            else:
                raise ValueError("Expect mode in [train, eval, infer],"
                                 "but received {}".format(mode))

        self.executor = estimator.Estimator(model_fn=_model_fn,
                                            model_dir=run_config.model_dir,
                                            config=exe_config)
        self.steps = run_config.steps
        print(">>>>>>>>>>>>Finish Compiling in {:.2}s>>>>>>>>>>>>".format(
            time.time() - start))
        print(envir_config)
        print(run_config)
        flag = input('Is all config correct? (yes/no)')
        if flag not in ['yes', 'y', '1']:
            exit(-1)