def test_predictor(self): input_generator = default_input_generator.DefaultRandomInputGenerator( batch_size=_BATCH_SIZE) model_dir = self.create_tempdir().full_path mock_model = mocks.MockT2RModel() train_eval.train_eval_model(t2r_model=mock_model, input_generator_train=input_generator, max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir) predictor = checkpoint_predictor.CheckpointPredictor( t2r_model=mock_model, checkpoint_dir=model_dir, use_gpu=False) with self.assertRaises(ValueError): predictor.predict({'does_not_matter': np.zeros(1)}) self.assertEqual(predictor.model_version, -1) self.assertEqual(predictor.global_step, -1) self.assertTrue(predictor.restore()) self.assertGreater(predictor.model_version, 0) self.assertEqual(predictor.global_step, 3) ref_feature_spec = mock_model.preprocessor.get_in_feature_specification( tf.estimator.ModeKeys.PREDICT) tensorspec_utils.assert_equal(predictor.get_feature_specification(), ref_feature_spec) features = tensorspec_utils.make_random_numpy(ref_feature_spec, batch_size=_BATCH_SIZE) predictions = predictor.predict(features) self.assertLen(predictions, 1) self.assertCountEqual(sorted(predictions.keys()), ['logit']) self.assertEqual(predictions['logit'].shape, (2, 1))
def test_predictor_load_final_model(self): input_generator = default_input_generator.DefaultRandomInputGenerator( batch_size=_BATCH_SIZE) model_dir = self.create_tempdir().full_path mock_model = mocks.MockT2RModel() train_eval.train_eval_model( t2r_model=mock_model, input_generator_train=input_generator, input_generator_eval=input_generator, max_train_steps=_MAX_TRAIN_STEPS, eval_steps=_MAX_EVAL_STEPS, model_dir=model_dir, create_exporters_fn=train_eval.create_default_exporters) export_dir = os.path.join(model_dir, 'export', 'latest_exporter_numpy') final_export_dir = sorted(tf.io.gfile.glob( os.path.join(export_dir, '*')), reverse=True)[0] predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor( export_dir=final_export_dir) predictor.restore() self.assertGreater(predictor.model_version, 0) self.assertEqual(predictor.global_step, 3) ref_feature_spec = mock_model.preprocessor.get_in_feature_specification( tf.estimator.ModeKeys.PREDICT) tensorspec_utils.assert_equal(predictor.get_feature_specification(), ref_feature_spec)
def test_make_placeholders(self, collection_type): spec = self._make_tensorspec_collection(collection_type) placeholders = utils.make_placeholders(spec) placeholder_spec = utils.tensorspec_from_tensors(placeholders) utils.assert_equal(spec, placeholder_spec, ignore_batch=True) with self.assertRaises(ValueError): utils.assert_equal(spec, placeholder_spec, ignore_batch=False)
def test_predictor_init_with_default_exporter(self, restore_model_option): input_generator = default_input_generator.DefaultRandomInputGenerator( batch_size=_BATCH_SIZE) model_dir = self.create_tempdir().full_path mock_model = mocks.MockT2RModel() train_eval.train_eval_model( t2r_model=mock_model, input_generator_train=input_generator, input_generator_eval=input_generator, max_train_steps=_MAX_TRAIN_STEPS, eval_steps=_MAX_EVAL_STEPS, model_dir=model_dir, create_exporters_fn=train_eval.create_default_exporters) predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor( export_dir=os.path.join(model_dir, 'export', 'latest_exporter_numpy'), restore_model_option=restore_model_option) if restore_model_option == exported_savedmodel_predictor.RestoreOptions.RESTORE_SYNCHRONOUSLY: predictor.restore() self.assertGreater(predictor.model_version, 0) self.assertEqual(predictor.global_step, 3) ref_feature_spec = mock_model.preprocessor.get_in_feature_specification( tf.estimator.ModeKeys.PREDICT) tensorspec_utils.assert_equal(predictor.get_feature_specification(), ref_feature_spec)
def test_parsing(self): base_dir = 'tensor2robot' file_pattern = os.path.join(FLAGS.test_srcdir, base_dir, 'test_data/pose_env_test_data.tfrecord') dataset = tfdata.parallel_read(file_patterns=file_pattern) state_spec = TSPEC(shape=(64, 64, 3), dtype=tf.uint8, name='state/image', data_format='jpeg') action_spec = TSPEC(shape=(2), dtype=tf.bfloat16, name='pose') reward_spec = TSPEC(shape=(), dtype=tf.float32, name='reward') feature_tspec = PoseEnvFeature(state=state_spec, action=action_spec) label_tspec = PoseEnvLabel(reward=reward_spec) batched_dataset = dataset.batch(batch_size=1) dataset = tfdata.serialized_to_parsed(batched_dataset, feature_tspec, label_tspec) iterator = dataset.make_one_shot_iterator() features, labels = iterator.get_next() tensorspec_utils.assert_equal(feature_tspec, features, ignore_batch=True) tensorspec_utils.assert_equal(label_tspec, labels, ignore_batch=True) with self.session() as session: features_, labels_ = session.run([features, labels]) self.assertAllEqual([1, 64, 64, 3], features_.state.shape) self.assertAllEqual([1, 2], features_.action.shape) self.assertAllEqual((1, ), labels_.reward.shape)
def test_predictor(self): input_generator = default_input_generator.DefaultRandomInputGenerator( batch_size=_BATCH_SIZE) model_dir = self.create_tempdir().full_path mock_model = mocks.MockT2RModel() train_eval.train_eval_model( t2r_model=mock_model, input_generator_train=input_generator, input_generator_eval=input_generator, max_train_steps=_MAX_TRAIN_STEPS, eval_steps=_MAX_EVAL_STEPS, model_dir=model_dir, create_exporters_fn=train_eval.create_default_exporters) predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor( export_dir=os.path.join(model_dir, 'export', 'latest_exporter_numpy')) with self.assertRaises(ValueError): predictor.get_feature_specification() with self.assertRaises(ValueError): predictor.predict({'does_not_matter': np.zeros(1)}) with self.assertRaises(ValueError): _ = predictor.model_version self.assertTrue(predictor.restore()) self.assertGreater(predictor.model_version, 0) ref_feature_spec = mock_model.preprocessor.get_in_feature_specification( tf.estimator.ModeKeys.PREDICT) tensorspec_utils.assert_equal(predictor.get_feature_specification(), ref_feature_spec) features = tensorspec_utils.make_random_numpy(ref_feature_spec, batch_size=_BATCH_SIZE) predictions = predictor.predict(features) self.assertLen(predictions, 1) self.assertEqual(predictions['logit'].shape, (2, 1))
def test_copy_none_name(self): spec = utils.TensorSpecStruct() spec.none_name = utils.ExtendedTensorSpec(shape=(1,), dtype=tf.float32) spec.with_name = utils.ExtendedTensorSpec( shape=(2,), dtype=tf.float32, name='with_name') spec_copy = utils.copy_tensorspec(spec, prefix='test') # Spec equality does not check the name utils.assert_equal(spec, spec_copy) self.assertEqual(spec_copy.none_name.name, 'test/') self.assertEqual(spec_copy.with_name.name, 'test/with_name')
def test_pack_flat_sequence_to_spec_structure(self): subset_placeholders = utils.make_placeholders(mock_nested_subset_spec) flattened_subset_placeholders = utils.flatten_spec_structure( subset_placeholders) packed_subset_placeholders = utils.pack_flat_sequence_to_spec_structure( mock_nested_subset_spec, flattened_subset_placeholders) utils.assert_equal(subset_placeholders, packed_subset_placeholders) utils.assert_equal(mock_nested_subset_spec, packed_subset_placeholders, ignore_batch=True) placeholders = utils.make_placeholders(mock_nested_spec) flattened_placeholders = utils.flatten_spec_structure(placeholders) packed_placeholders = utils.pack_flat_sequence_to_spec_structure( mock_nested_subset_spec, flattened_placeholders) # We only subselect what we need in pack_flat_sequence_to_spec_structure, # hence, we should recover what we wanted. utils.assert_equal(mock_nested_subset_spec, packed_placeholders, ignore_batch=True) utils.assert_equal(subset_placeholders, packed_placeholders) packed_optional_placeholders = utils.pack_flat_sequence_to_spec_structure( mock_nested_optional_spec, flattened_placeholders) # Although mock_nested_optional_spec would like more tensors # flattened_placeholders cannot provide them, fortunately they are optional. utils.assert_required(packed_optional_placeholders, placeholders) utils.assert_required(mock_nested_spec, packed_optional_placeholders, ignore_batch=True)
def test_predictor_with_async_hook(self): model_dir = self.create_tempdir().full_path default_create_export_fn = functools.partial( async_export_hook_builder.default_create_export_fn, batch_sizes_for_export=_BATCH_SIZES_FOR_EXPORT) export_dir = os.path.join(model_dir, _EXPORT_DIR) hook_builder = async_export_hook_builder.AsyncExportHookBuilder( export_dir=export_dir, create_export_fn=default_create_export_fn) input_generator = default_input_generator.DefaultRandomInputGenerator( batch_size=_BATCH_SIZE) mock_model = mocks.MockT2RModel() train_eval.train_eval_model(t2r_model=mock_model, input_generator_train=input_generator, train_hook_builders=[hook_builder], max_train_steps=_MAX_TRAIN_STEPS, model_dir=model_dir) predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor( export_dir=os.path.join(model_dir, _EXPORT_DIR)) with self.assertRaises(ValueError): predictor.get_feature_specification() with self.assertRaises(ValueError): predictor.predict({'does_not_matter': np.zeros(1)}) with self.assertRaises(ValueError): _ = predictor.model_version self.assertEqual(predictor.global_step, -1) self.assertTrue(predictor.restore()) self.assertGreater(predictor.model_version, 0) # NOTE: The async hook builder will export the global step. self.assertEqual(predictor.global_step, 3) ref_feature_spec = mock_model.preprocessor.get_in_feature_specification( tf.estimator.ModeKeys.PREDICT) tensorspec_utils.assert_equal(predictor.get_feature_specification(), ref_feature_spec) features = tensorspec_utils.make_random_numpy(ref_feature_spec, batch_size=_BATCH_SIZE) predictions = predictor.predict(features) self.assertLen(predictions, 1) self.assertCountEqual(sorted(predictions.keys()), ['logit']) self.assertEqual(predictions['logit'].shape, (2, 1))
def test_init_with_attributes(self): train = utils.TensorSpecStruct(images=T1, actions=T2) flat_nested_optional_spec = utils.flatten_spec_structure( mock_nested_optional_spec) utils.assert_equal(train, flat_nested_optional_spec.train) alternative_dict = {'o6': O6, 'o4': O4} hierarchy = utils.TensorSpecStruct( nested_optional_spec=mock_nested_optional_spec, alternative=alternative_dict) utils.assert_equal(hierarchy.nested_optional_spec, flat_nested_optional_spec) self.assertDictEqual(hierarchy.alternative.to_dict(), alternative_dict) self.assertCountEqual(list(hierarchy.alternative.keys()), ['o4', 'o6']) self.assertCountEqual(list(hierarchy.keys()), [ 'nested_optional_spec/train/images', 'nested_optional_spec/train/actions', 'nested_optional_spec/test/images', 'nested_optional_spec/test/actions', 'nested_optional_spec/optional/images', 'nested_optional_spec/optional/actions', 'alternative/o6', 'alternative/o4' ])
def test_assert_not_equal(self): with self.assertRaises(ValueError): utils.assert_equal(mock_nested_spec, mock_nested_subset_spec)
def test_assert_equal(self): mock_nested_spec_copy = copy.deepcopy(mock_nested_spec) utils.assert_equal(mock_nested_spec, mock_nested_spec_copy)
def test_copy(self, collection_type): spec = self._make_tensorspec_collection(collection_type) spec_copy = utils.copy_tensorspec(spec) utils.assert_equal(spec, spec_copy)
def test_assert_not_equal(self): with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises utils.assert_equal(mock_nested_spec, mock_nested_subset_spec)