def _generator_fn(self, batch_size):
     while True:
         features = tensorspec_utils.make_random_numpy(
             self._feature_spec, batch_size, self._sequence_length)
         labels = tensorspec_utils.make_random_numpy(
             self._label_spec, batch_size, self._sequence_length)
         yield features, labels
Beispiel #2
0
 def _create_mock_tensors(self,
                          base_preprocessor,
                          batch_size,
                          mode=tf.estimator.ModeKeys.TRAIN):
   np.random.seed(_RANDOM_SEED)
   features = utils.make_random_numpy(
       base_preprocessor.get_in_feature_specification(mode),
       batch_size=batch_size)
   labels = utils.make_random_numpy(
       base_preprocessor.get_in_label_specification(mode),
       batch_size=batch_size)
   return (features, labels)
Beispiel #3
0
  def test_stack_intratask_episodes(self):
    feature_spec = TSpec()
    feature_spec.image = utils.ExtendedTensorSpec(
        shape=_DEFAULT_IN_IMAGE_SHAPE,
        dtype=tf.uint8,
        is_optional=False,
        data_format='jpeg',
        name='state/image')
    feature_spec.action = utils.ExtendedTensorSpec(
        shape=_DEFAULT_ACTION_SHAPE,
        dtype=tf.float32,
        is_optional=False,
        name='state/action')

    batch_size = 2
    num_samples_in_task = 3
    metaexample_spec = preprocessors.create_metaexample_spec(
        feature_spec, num_samples_in_task, 'condition')
    tensors = utils.make_random_numpy(metaexample_spec, batch_size)
    out_tensors = preprocessors.stack_intra_task_episodes(
        tensors, num_samples_in_task)

    self.assertEqual(
        out_tensors.image.shape,
        (batch_size, num_samples_in_task) + _DEFAULT_IN_IMAGE_SHAPE)
    self.assertEqual(
        out_tensors.action.shape,
        (batch_size, num_samples_in_task) + _DEFAULT_ACTION_SHAPE)
Beispiel #4
0
    def test_predictor(self):
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        model_dir = self.create_tempdir().full_path
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(t2r_model=mock_model,
                                    input_generator_train=input_generator,
                                    max_train_steps=_MAX_TRAIN_STEPS,
                                    model_dir=model_dir)

        predictor = checkpoint_predictor.CheckpointPredictor(
            t2r_model=mock_model, checkpoint_dir=model_dir, use_gpu=False)
        with self.assertRaises(ValueError):
            predictor.predict({'does_not_matter': np.zeros(1)})
        self.assertEqual(predictor.model_version, -1)
        self.assertEqual(predictor.global_step, -1)
        self.assertTrue(predictor.restore())
        self.assertGreater(predictor.model_version, 0)
        self.assertEqual(predictor.global_step, 3)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
        features = tensorspec_utils.make_random_numpy(ref_feature_spec,
                                                      batch_size=_BATCH_SIZE)
        predictions = predictor.predict(features)
        self.assertLen(predictions, 1)
        self.assertCountEqual(sorted(predictions.keys()), ['logit'])
        self.assertEqual(predictions['logit'].shape, (2, 1))
Beispiel #5
0
  def _test_predictor(self, predictor_cls):
    mock_model = mocks.MockTF2T2RModel()

    # Generate a sample to evaluate
    feature_spec = mock_model.preprocessor.get_in_feature_specification(
        tf.compat.v1.estimator.ModeKeys.PREDICT)
    sample_features = tensorspec_utils.make_random_numpy(
        feature_spec, batch_size=_BATCH_SIZE)

    # Generate a saved model and load it.
    path = self._save_model(mock_model, sample_features)
    saved_model_predictor = predictor_cls(path)

    # Not restored yet.
    with self.assertRaises(ValueError):
      saved_model_predictor.predict(sample_features)

    saved_model_predictor.restore()

    # Validate evaluations are the same afterwards.
    original_model_out = mock_model.inference_network_fn(
        sample_features, None, tf.compat.v1.estimator.ModeKeys.PREDICT)

    predictor_out = saved_model_predictor.predict(sample_features)

    np.testing.assert_almost_equal(original_model_out['logits'],
                                   predictor_out['logits'])
Beispiel #6
0
    def test_predictor(self):
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        model_dir = self.create_tempdir().full_path
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(
            t2r_model=mock_model,
            input_generator_train=input_generator,
            input_generator_eval=input_generator,
            max_train_steps=_MAX_TRAIN_STEPS,
            eval_steps=_MAX_EVAL_STEPS,
            model_dir=model_dir,
            create_exporters_fn=train_eval.create_default_exporters)

        predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
            export_dir=os.path.join(model_dir, 'export',
                                    'latest_exporter_numpy'))
        with self.assertRaises(ValueError):
            predictor.get_feature_specification()
        with self.assertRaises(ValueError):
            predictor.predict({'does_not_matter': np.zeros(1)})
        with self.assertRaises(ValueError):
            _ = predictor.model_version
        self.assertTrue(predictor.restore())
        self.assertGreater(predictor.model_version, 0)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
        features = tensorspec_utils.make_random_numpy(ref_feature_spec,
                                                      batch_size=_BATCH_SIZE)
        predictions = predictor.predict(features)
        self.assertLen(predictions, 1)
        self.assertEqual(predictions['logit'].shape, (2, 1))
    def test_predictor_with_async_hook(self):
        model_dir = self.create_tempdir().full_path
        default_create_export_fn = functools.partial(
            async_export_hook_builder.default_create_export_fn,
            batch_sizes_for_export=_BATCH_SIZES_FOR_EXPORT)
        export_dir = os.path.join(model_dir, _EXPORT_DIR)
        hook_builder = async_export_hook_builder.AsyncExportHookBuilder(
            export_dir=export_dir, create_export_fn=default_create_export_fn)
        input_generator = default_input_generator.DefaultRandomInputGenerator(
            batch_size=_BATCH_SIZE)
        mock_model = mocks.MockT2RModel()
        train_eval.train_eval_model(t2r_model=mock_model,
                                    input_generator_train=input_generator,
                                    train_hook_builders=[hook_builder],
                                    max_train_steps=_MAX_TRAIN_STEPS,
                                    model_dir=model_dir)

        predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
            export_dir=os.path.join(model_dir, _EXPORT_DIR))
        with self.assertRaises(ValueError):
            predictor.get_feature_specification()
        with self.assertRaises(ValueError):
            predictor.predict({'does_not_matter': np.zeros(1)})
        with self.assertRaises(ValueError):
            _ = predictor.model_version
        self.assertEqual(predictor.global_step, -1)
        self.assertTrue(predictor.restore())
        self.assertGreater(predictor.model_version, 0)
        # NOTE: The async hook builder will export the global step.
        self.assertEqual(predictor.global_step, 3)
        ref_feature_spec = mock_model.preprocessor.get_in_feature_specification(
            tf.estimator.ModeKeys.PREDICT)
        tensorspec_utils.assert_equal(predictor.get_feature_specification(),
                                      ref_feature_spec)
        features = tensorspec_utils.make_random_numpy(ref_feature_spec,
                                                      batch_size=_BATCH_SIZE)
        predictions = predictor.predict(features)
        self.assertLen(predictions, 1)
        self.assertCountEqual(sorted(predictions.keys()), ['logit'])
        self.assertEqual(predictions['logit'].shape, (2, 1))
Beispiel #8
0
    def preprocess(preprocessor, feature_spec, label_spec, flatten=False):
      with tf.Session() as sess:
        feature_placeholders = tensorspec_utils.make_placeholders(
            feature_spec, batch_size=1)
        label_placeholders = None
        if label_spec is not None:
          label_placeholders = tensorspec_utils.make_placeholders(
              label_spec, batch_size=1)

        # Normally we want our features and labels to be flattened.
        # However we support not flattened hierarchies as well.
        if flatten:
          feature_placeholders = tensorspec_utils.flatten_spec_structure(
              feature_placeholders)
          if label_spec is not None:
            label_placeholders = tensorspec_utils.flatten_spec_structure(
                label_placeholders)

        (features_preprocessed, labels_preprocessed) = preprocessor.preprocess(
            features=feature_placeholders,
            labels=label_placeholders,
            mode=tf.estimator.ModeKeys.TRAIN)

        # We create a mapping of {key: np.array} or a namedtuple spec structure.
        np_feature_spec = tensorspec_utils.make_random_numpy(
            feature_spec, batch_size=1)
        if label_placeholders is not None:
          np_label_spec = tensorspec_utils.make_random_numpy(
              label_spec, batch_size=1)

        # We create our feed dict which basically consists of
        # {placeholders: np.array}.
        feed_dict = tensorspec_utils.map_feed_dict(feature_placeholders,
                                                   np_feature_spec,
                                                   ignore_batch=True)
        if label_placeholders is not None:
          feed_dict = tensorspec_utils.map_feed_dict(label_placeholders,
                                                     np_label_spec,
                                                     feed_dict,
                                                     ignore_batch=True)

        fetch_results = [features_preprocessed]
        if label_placeholders is not None:
          fetch_results.append(labels_preprocessed)

        np_preprocessed = sess.run(
            fetch_results, feed_dict=feed_dict)

        np_features_preprocessed = np_preprocessed[0]
        if label_placeholders is not None:
          np_labels_preprocessed = np_preprocessed[1]

        np_feature_spec = tensorspec_utils.flatten_spec_structure(
            np_feature_spec)
        if label_placeholders is not None:
          np_label_spec = tensorspec_utils.flatten_spec_structure(np_label_spec)

        for key, value in np_feature_spec.items():
          np.testing.assert_allclose(value, np_features_preprocessed[key])

        if label_placeholders is not None:
          for key, value in np_label_spec.items():
            np.testing.assert_allclose(value, np_labels_preprocessed[key])