예제 #1
0
    def test_raise_error_with_custom_init_fn_in_eval(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels

            def init_fn(scaffold, session):
                _, _ = scaffold, session

            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant(3.),
                scaffold=training.Scaffold(init_fn=init_fn),
                train_op=constant_op.constant(5.),
                eval_metric_ops={
                    'mean_of_features':
                    metrics_lib.mean(constant_op.constant(2.))
                })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return dataset_ops.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
        with self.assertRaisesRegexp(ValueError,
                                     'does not support custom init_fn'):
            evaluator.begin()
예제 #2
0
    def test_raise_error_with_saveables_other_than_global_variables(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels
            w = variables.VariableV1(
                initial_value=[0.],
                trainable=False,
                collections=[ops.GraphKeys.SAVEABLE_OBJECTS])
            init_op = control_flow_ops.group(
                [w.initializer,
                 training.get_global_step().initializer])
            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant(3.),
                scaffold=training.Scaffold(init_op=init_op),
                train_op=constant_op.constant(5.),
                eval_metric_ops={
                    'mean_of_features':
                    metrics_lib.mean(constant_op.constant(2.))
                })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return dataset_ops.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
        with self.assertRaisesRegexp(ValueError, 'does not support saveables'):
            estimator.train(input_fn, hooks=[evaluator])
예제 #3
0
    def _build_estimator(self):
        self.sess_config = tf.ConfigProto(allow_soft_placement=True)
        self.sess_config.gpu_options.allow_growth = True
        distribute = tf.distribute.MirroredStrategy(
        ) if self._flags.distribute else None
        config = estimator.RunConfig(
            model_dir=self._flags.checkpoint_dir,
            save_summary_steps=self._flags.save_summary_steps,
            save_checkpoints_steps=self._flags.checkpoints_steps,
            keep_checkpoint_max=2,
            session_config=self.sess_config,
            log_step_count_steps=self._flags.log_step_count_steps,
            train_distribute=distribute,
            eval_distribute=distribute)

        warm_start_setting = None
        if os.path.exists(self._flags.warm_start_dir):
            vars_to_warm_start = '.*/embeddings[^/]' if self._flags.warm_start_mode == 'emb' else '.*'
            warm_start_setting = estimator.WarmStartSettings(
                ckpt_to_initialize_from=self._flags.warm_start_dir,
                vars_to_warm_start=vars_to_warm_start)

        self.estimator = estimator.Estimator(
            self._build_model_fn(),
            config=config,
            warm_start_from=warm_start_setting)
예제 #4
0
    def test_uses_latest_variable_value(self):
        def model_fn(features, labels, mode):
            _ = labels
            step = tf.compat.v1.train.get_global_step()
            w = tf.compat.v1.get_variable(
                'w',
                shape=[],
                initializer=tf.compat.v1.initializers.zeros(),
                dtype=tf.dtypes.int64)
            if estimator_lib.ModeKeys.TRAIN == mode:
                # to consume features, we have control dependency
                with tf.control_dependencies([features]):
                    step_inc = tf.compat.v1.assign_add(
                        tf.compat.v1.train.get_global_step(), 1)
                with tf.control_dependencies([step_inc]):
                    assign_w_to_step_plus_2 = w.assign(step + 2)
                return estimator_lib.EstimatorSpec(
                    mode,
                    loss=tf.constant(3.),
                    train_op=assign_w_to_step_plus_2)
            if estimator_lib.ModeKeys.EVAL == mode:
                # to consume features, we have control dependency
                with tf.control_dependencies([features]):
                    loss = tf.constant(5.)
                mean = metrics_module.Mean()
                mean.update_state(w)
                return estimator_lib.EstimatorSpec(
                    mode,
                    loss=loss,
                    # w is constant in each step, so the mean.
                    # w = 0 if step==0 else step+2
                    eval_metric_ops={'mean_of_const': mean})

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return tf.compat.v1.data.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator,
                                                    input_fn,
                                                    every_n_iter=4)
        estimator.train(input_fn, hooks=[evaluator])

        self.assertTrue(os.path.isdir(estimator.eval_dir()))
        step_keyword_to_value = summary_step_keyword_to_value_mapping(
            estimator.eval_dir())
        # w = 0 if step==0 else step+2
        self.assertEqual(0, step_keyword_to_value[0]['mean_of_const'])
        self.assertEqual(6, step_keyword_to_value[4]['mean_of_const'])
        self.assertEqual(12, step_keyword_to_value[10]['mean_of_const'])
예제 #5
0
    def test_should_not_conflict_with_existing_predictions(self):
        def input_fn():
            return {'x': [[3.], [5.]], 'id': [[101], [102]]}

        def model_fn(features, mode):
            del features
            global_step = training.get_global_step()
            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant([5.]),
                predictions={'x': constant_op.constant([5.])},
                train_op=global_step.assign_add(1))

        estimator = estimator_lib.Estimator(model_fn=model_fn)
        estimator.train(input_fn=input_fn, steps=1)

        estimator = extenders.forward_features(estimator)
        with self.assertRaisesRegexp(ValueError, 'Cannot forward feature key'):
            next(estimator.predict(input_fn=input_fn))
예제 #6
0
    def test_runs_eval_metrics(self):
        def model_fn(features, labels, mode):
            _ = labels
            if estimator_lib.ModeKeys.TRAIN == mode:
                with tf.control_dependencies([features]):
                    train_op = tf.compat.v1.assign_add(
                        tf.compat.v1.train.get_global_step(), 1)
                return estimator_lib.EstimatorSpec(mode,
                                                   loss=tf.constant(3.),
                                                   train_op=train_op)
            if estimator_lib.ModeKeys.EVAL == mode:
                mean = metrics_module.Mean()
                mean.update_state(features)
                return estimator_lib.EstimatorSpec(mode,
                                                   loss=tf.constant(5.),
                                                   eval_metric_ops={
                                                       'mean_of_features':
                                                       mean,
                                                   })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return tf.compat.v1.data.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator,
                                                    input_fn,
                                                    every_n_iter=4)
        estimator.train(input_fn, hooks=[evaluator])

        self.assertTrue(os.path.isdir(estimator.eval_dir()))
        step_keyword_to_value = summary_step_keyword_to_value_mapping(
            estimator.eval_dir())

        # 4.5 = sum(range(10))/10
        # before training
        self.assertEqual(4.5, step_keyword_to_value[0]['mean_of_features'])
        # intervals (every_n_iter=4)
        self.assertEqual(4.5, step_keyword_to_value[4]['mean_of_features'])
        self.assertEqual(4.5, step_keyword_to_value[8]['mean_of_features'])
        # end
        self.assertEqual(4.5, step_keyword_to_value[10]['mean_of_features'])
        self.assertEqual(set([0, 4, 8, 10]), set(step_keyword_to_value.keys()))
예제 #7
0
    def test_raise_error_with_custom_saver_in_eval(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels
            mean = metrics_module.Mean()
            mean.update_state(constant_op.constant(2.))
            return estimator_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant(3.),
                scaffold=training.Scaffold(saver=training.Saver()),
                train_op=constant_op.constant(5.),
                eval_metric_ops={
                    'mean_of_features': mean,
                })

        estimator = estimator_lib.Estimator(model_fn=model_fn)

        def input_fn():
            return dataset_ops.Dataset.range(10)

        evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
        with self.assertRaisesRegexp(ValueError,
                                     'does not support custom saver'):
            evaluator.begin()