Exemple #1
0
    def test_predictor(self):
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        model_dir = self.create_tempdir().full_path
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(t2r_model=mock_model,
                                    input_generator_train=input_generator,
                                    max_train_steps=_MAX_TRAIN_STEPS,
                                    model_dir=model_dir)

        predictor = checkpoint_predictor.CheckpointPredictor(
            t2r_model=mock_model, checkpoint_dir=model_dir, use_gpu=False)
        with self.assertRaises(ValueError):
            predictor.predict({'does_not_matter': np.zeros(1)})
        self.assertEqual(predictor.model_version, -1)
        self.assertEqual(predictor.global_step, -1)
        self.assertTrue(predictor.restore())
        self.assertGreater(predictor.model_version, 0)
        self.assertEqual(predictor.global_step, 3)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
        features = tensorspec_utils.make_random_numpy(ref_feature_spec,
                                                      batch_size=_BATCH_SIZE)
        predictions = predictor.predict(features)
        self.assertLen(predictions, 1)
        self.assertCountEqual(sorted(predictions.keys()), ['logit'])
        self.assertEqual(predictions['logit'].shape, (2, 1))
    def random_train_model(self, tf_model, **module_kwargs):
        """Trains a T2R model with random inputs."""
        params = self._get_params(
            model_dir=self._test_case.create_tempdir().full_path,
            **module_kwargs)
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=params['batch_size'])

        initialize_system = tpu.initialize_system
        with mock.patch.object(tpu, 'initialize_system',
                               autospec=True) as mock_init:
            mock_init.side_effect = initialize_system
            train_eval.train_eval_model(
                t2r_model=tf_model,
                input_generator_train=input_generator,
                max_train_steps=params['max_train_steps'],
                model_dir=params['model_dir'],
                use_tpu_wrapper=params['use_tpu_wrapper'])
            if self._use_tpu:
                mock_init.assert_called()
            train_eval_test_utils.assert_output_files(
                test_case=self._test_case,
                model_dir=params['model_dir'],
                expected_output_filename_patterns=train_eval_test_utils.
                DEFAULT_TRAIN_FILENAME_PATTERNS)
Exemple #3
0
 def test_predictor_load_final_model(self):
     input_generator = default_input_generator.DefaultRandomInputGenerator(
         batch_size=_BATCH_SIZE)
     model_dir = self.create_tempdir().full_path
     mock_model = mocks.MockT2RModel()
     train_eval.train_eval_model(
         t2r_model=mock_model,
         input_generator_train=input_generator,
         input_generator_eval=input_generator,
         max_train_steps=_MAX_TRAIN_STEPS,
         eval_steps=_MAX_EVAL_STEPS,
         model_dir=model_dir,
         create_exporters_fn=train_eval.create_default_exporters)
     export_dir = os.path.join(model_dir, 'export', 'latest_exporter_numpy')
     final_export_dir = sorted(tf.io.gfile.glob(
         os.path.join(export_dir, '*')),
                               reverse=True)[0]
     predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
         export_dir=final_export_dir)
     predictor.restore()
     self.assertGreater(predictor.model_version, 0)
     self.assertEqual(predictor.global_step, 3)
     ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
         tf.estimator.ModeKeys.PREDICT)
     tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                   ref_feature_spec)
Exemple #4
0
    def test_predictor_init_with_default_exporter(self, restore_model_option):
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        model_dir = self.create_tempdir().full_path
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(
            t2r_model=mock_model,
            input_generator_train=input_generator,
            input_generator_eval=input_generator,
            max_train_steps=_MAX_TRAIN_STEPS,
            eval_steps=_MAX_EVAL_STEPS,
            model_dir=model_dir,
            create_exporters_fn=train_eval.create_default_exporters)

        predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
            export_dir=os.path.join(model_dir, 'export',
                                    'latest_exporter_numpy'),
            restore_model_option=restore_model_option)
        if restore_model_option == exported_savedmodel_predictor.RestoreOptions.RESTORE_SYNCHRONOUSLY:
            predictor.restore()
        self.assertGreater(predictor.model_version, 0)
        self.assertEqual(predictor.global_step, 3)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
Exemple #5
0
    def test_predictor(self):
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        model_dir = self.create_tempdir().full_path
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(
            t2r_model=mock_model,
            input_generator_train=input_generator,
            input_generator_eval=input_generator,
            max_train_steps=_MAX_TRAIN_STEPS,
            eval_steps=_MAX_EVAL_STEPS,
            model_dir=model_dir,
            create_exporters_fn=train_eval.create_default_exporters)

        predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
            export_dir=os.path.join(model_dir, 'export',
                                    'latest_exporter_numpy'))
        with self.assertRaises(ValueError):
            predictor.get_feature_specification()
        with self.assertRaises(ValueError):
            predictor.predict({'does_not_matter': np.zeros(1)})
        with self.assertRaises(ValueError):
            _ = predictor.model_version
        self.assertTrue(predictor.restore())
        self.assertGreater(predictor.model_version, 0)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
        features = tensorspec_utils.make_random_numpy(ref_feature_spec,
                                                      batch_size=_BATCH_SIZE)
        predictions = predictor.predict(features)
        self.assertLen(predictions, 1)
        self.assertEqual(predictions['logit'].shape, (2, 1))
    def random_predict(self, module_name, model_name, **module_kwargs):
        """Runs predictions through a model with random inputs."""
        tf_model = getattr(module_name, model_name)(**module_kwargs)

        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=1)
        for prediction in train_eval.predict_from_model(
                t2r_model=tf_model,
                input_generator_predict=input_generator,
                model_dir=self._test_case.create_tempdir().full_path):
            return prediction
        return None
    def setUp(self):
        super(PoseEnvModelsTest, self).setUp()
        base_dir = 'tensor2robot'
        test_data = os.path.join(FLAGS.test_srcdir, base_dir,
                                 'test_data/pose_env_test_data.tfrecord')
        self._train_log_dir = FLAGS.test_tmpdir
        if tf.io.gfile.exists(self._train_log_dir):
            tf.io.gfile.rmtree(self._train_log_dir)
        gin.bind_parameter('train_eval_model.max_train_steps', 3)
        gin.bind_parameter('train_eval_model.eval_steps', 2)

        self._record_input_generator = (
            default_input_generator.DefaultRecordInputGenerator(
                batch_size=BATCH_SIZE, file_patterns=test_data))

        self._meta_record_input_generator_train = (
            default_input_generator.DefaultRandomInputGenerator(
                batch_size=BATCH_SIZE))
        self._meta_record_input_generator_eval = (
            default_input_generator.DefaultRandomInputGenerator(
                batch_size=BATCH_SIZE))
    def test_predictor_with_async_hook(self):
        model_dir = self.create_tempdir().full_path
        default_create_export_fn = functools.partial(
            async_export_hook_builder.default_create_export_fn,
            batch_sizes_for_export=_BATCH_SIZES_FOR_EXPORT)
        export_dir = os.path.join(model_dir, _EXPORT_DIR)
        hook_builder = async_export_hook_builder.AsyncExportHookBuilder(
            export_dir=export_dir, create_export_fn=default_create_export_fn)
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(t2r_model=mock_model,
                                    input_generator_train=input_generator,
                                    train_hook_builders=[hook_builder],
                                    max_train_steps=_MAX_TRAIN_STEPS,
                                    model_dir=model_dir)

        predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
            export_dir=os.path.join(model_dir, _EXPORT_DIR))
        with self.assertRaises(ValueError):
            predictor.get_feature_specification()
        with self.assertRaises(ValueError):
            predictor.predict({'does_not_matter': np.zeros(1)})
        with self.assertRaises(ValueError):
            _ = predictor.model_version
        self.assertEqual(predictor.global_step, -1)
        self.assertTrue(predictor.restore())
        self.assertGreater(predictor.model_version, 0)
        # NOTE: The async hook builder will export the global step.
        self.assertEqual(predictor.global_step, 3)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
        features = tensorspec_utils.make_random_numpy(ref_feature_spec,
                                                      batch_size=_BATCH_SIZE)
        predictions = predictor.predict(features)
        self.assertLen(predictions, 1)
        self.assertCountEqual(sorted(predictions.keys()), ['logit'])
        self.assertEqual(predictions['logit'].shape, (2, 1))
 def test_random_dataset(self):
     input_generator = default_input_generator.DefaultRandomInputGenerator(
         batch_size=2)
     self._test_input_generator(input_generator)