示例#1
0
    def test_raise_error_with_custom_init_fn_in_eval(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels

            def init_fn(scaffold, session):
                _, _ = scaffold, session

            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant(3.),
                scaffold=training.Scaffold(init_fn=init_fn),
                train_op=constant_op.constant(5.),
                eval_metric_ops={
                    'mean_of_features':
                    metrics_lib.mean(constant_op.constant(2.))
                })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return dataset_ops.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
        with self.assertRaisesRegexp(ValueError,
                                     'does not support custom init_fn'):
            evaluator.begin()
示例#2
0
    def test_raise_error_with_saveables_other_than_global_variables(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels
            w = variables.VariableV1(
                initial_value=[0.],
                trainable=False,
                collections=[ops.GraphKeys.SAVEABLE_OBJECTS])
            init_op = control_flow_ops.group(
                [w.initializer,
                 training.get_global_step().initializer])
            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant(3.),
                scaffold=training.Scaffold(init_op=init_op),
                train_op=constant_op.constant(5.),
                eval_metric_ops={
                    'mean_of_features':
                    metrics_lib.mean(constant_op.constant(2.))
                })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return dataset_ops.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
        with self.assertRaisesRegexp(ValueError, 'does not support saveables'):
            estimator.train(input_fn, hooks=[evaluator])
示例#3
0
    def test_dnn_classifier(self):
        embedding = feature_column_lib.embedding_column(
            feature_column_lib.categorical_column_with_vocabulary_list(
                'wire_cast', ['kima', 'omar', 'stringer']), 8)
        dnn = estimator_lib.DNNClassifier(feature_columns=[embedding],
                                          hidden_units=[3, 1])

        def train_input_fn():
            return dataset_ops.Dataset.from_tensors(({
                'wire_cast': [['omar'], ['kima']]
            }, [[0], [1]])).repeat(3)

        def eval_input_fn():
            return dataset_ops.Dataset.from_tensors(({
                'wire_cast': [['stringer'], ['kima']]
            }, [[0], [1]])).repeat(2)

        evaluator = hooks_lib.InMemoryEvaluatorHook(dnn,
                                                    eval_input_fn,
                                                    name='in-memory')
        dnn.train(train_input_fn, hooks=[evaluator])
        self.assertTrue(os.path.isdir(dnn.eval_dir('in-memory')))
        step_keyword_to_value = summary_step_keyword_to_value_mapping(
            dnn.eval_dir('in-memory'))

        final_metrics = dnn.evaluate(eval_input_fn)
        step = final_metrics[ops.GraphKeys.GLOBAL_STEP]
        for summary_tag in final_metrics:
            if summary_tag == ops.GraphKeys.GLOBAL_STEP:
                continue
            self.assertEqual(final_metrics[summary_tag],
                             step_keyword_to_value[step][summary_tag])
示例#4
0
    def test_uses_latest_variable_value(self):
        def model_fn(features, labels, mode):
            _ = labels
            step = tf.compat.v1.train.get_global_step()
            w = tf.compat.v1.get_variable(
                'w',
                shape=[],
                initializer=tf.compat.v1.initializers.zeros(),
                dtype=tf.dtypes.int64)
            if estimator_lib.ModeKeys.TRAIN == mode:
                # to consume features, we have control dependency
                with tf.control_dependencies([features]):
                    step_inc = tf.compat.v1.assign_add(
                        tf.compat.v1.train.get_global_step(), 1)
                with tf.control_dependencies([step_inc]):
                    assign_w_to_step_plus_2 = w.assign(step + 2)
                return estimator_lib.EstimatorSpec(
                    mode,
                    loss=tf.constant(3.),
                    train_op=assign_w_to_step_plus_2)
            if estimator_lib.ModeKeys.EVAL == mode:
                # to consume features, we have control dependency
                with tf.control_dependencies([features]):
                    loss = tf.constant(5.)
                mean = metrics_module.Mean()
                mean.update_state(w)
                return estimator_lib.EstimatorSpec(
                    mode,
                    loss=loss,
                    # w is constant in each step, so the mean.
                    # w = 0 if step==0 else step+2
                    eval_metric_ops={'mean_of_const': mean})

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return tf.compat.v1.data.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator,
                                                    input_fn,
                                                    every_n_iter=4)
        estimator.train(input_fn, hooks=[evaluator])

        self.assertTrue(os.path.isdir(estimator.eval_dir()))
        step_keyword_to_value = summary_step_keyword_to_value_mapping(
            estimator.eval_dir())
        # w = 0 if step==0 else step+2
        self.assertEqual(0, step_keyword_to_value[0]['mean_of_const'])
        self.assertEqual(6, step_keyword_to_value[4]['mean_of_const'])
        self.assertEqual(12, step_keyword_to_value[10]['mean_of_const'])
示例#5
0
    def test_runs_eval_metrics(self):
        def model_fn(features, labels, mode):
            _ = labels
            if estimator_lib.ModeKeys.TRAIN == mode:
                with tf.control_dependencies([features]):
                    train_op = tf.compat.v1.assign_add(
                        tf.compat.v1.train.get_global_step(), 1)
                return estimator_lib.EstimatorSpec(mode,
                                                   loss=tf.constant(3.),
                                                   train_op=train_op)
            if estimator_lib.ModeKeys.EVAL == mode:
                mean = metrics_module.Mean()
                mean.update_state(features)
                return estimator_lib.EstimatorSpec(mode,
                                                   loss=tf.constant(5.),
                                                   eval_metric_ops={
                                                       'mean_of_features':
                                                       mean,
                                                   })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return tf.compat.v1.data.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator,
                                                    input_fn,
                                                    every_n_iter=4)
        estimator.train(input_fn, hooks=[evaluator])

        self.assertTrue(os.path.isdir(estimator.eval_dir()))
        step_keyword_to_value = summary_step_keyword_to_value_mapping(
            estimator.eval_dir())

        # 4.5 = sum(range(10))/10
        # before training
        self.assertEqual(4.5, step_keyword_to_value[0]['mean_of_features'])
        # intervals (every_n_iter=4)
        self.assertEqual(4.5, step_keyword_to_value[4]['mean_of_features'])
        self.assertEqual(4.5, step_keyword_to_value[8]['mean_of_features'])
        # end
        self.assertEqual(4.5, step_keyword_to_value[10]['mean_of_features'])
        self.assertEqual(set([0, 4, 8, 10]), set(step_keyword_to_value.keys()))
示例#6
0
  def test_raise_error_with_ps(self):
    tf_config = {
        'cluster': {
            run_config_lib.TaskType.CHIEF: ['host0:0'],
            run_config_lib.TaskType.PS: ['host1:1'],
        },
        'task': {
            'type': run_config_lib.TaskType.CHIEF,
            'index': 0
        }
    }
    with test.mock.patch.dict('os.environ',
                              {'TF_CONFIG': json.dumps(tf_config)}):
      dnn = estimator_lib.DNNClassifier(
          feature_columns=[feature_column_lib.numeric_column('x')],
          hidden_units=[3, 1])

    def eval_input_fn():
      pass

    with self.assertRaisesRegexp(ValueError, 'supports only single machine'):
      hooks_lib.InMemoryEvaluatorHook(dnn, eval_input_fn)
示例#7
0
    def test_raise_error_with_custom_saver_in_eval(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels
            mean = metrics_module.Mean()
            mean.update_state(constant_op.constant(2.))
            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant(3.),
                scaffold=training.Scaffold(saver=training.Saver()),
                train_op=constant_op.constant(5.),
                eval_metric_ops={
                    'mean_of_features': mean,
                })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return dataset_ops.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
        with self.assertRaisesRegexp(ValueError,
                                     'does not support custom saver'):
            evaluator.begin()