def serving_input_receiver_fn():
            """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
            # We have to filter our specs since only required tensors are
            # used for inference time.
            flat_feature_spec = tensorspec_utils.flatten_spec_structure(
                self._get_input_features_for_receiver_fn())
            required_feature_spec = (
                tensorspec_utils.filter_required_flat_tensor_spec(
                    flat_feature_spec))
            receiver_tensors = tensorspec_utils.make_placeholders(
                required_feature_spec)

            # We want to ensure that our feature processing pipeline operates on a
            # copy of the features and does not alter the receiver_tensors.
            features = tensorspec_utils.flatten_spec_structure(
                copy.copy(receiver_tensors))

            if (not self._export_raw_receivers
                    and self._preprocess_fn is not None):
                features, _ = self._preprocess_fn(features=features,
                                                  labels=None)

            return tf.estimator.export.ServingInputReceiver(
                features, receiver_tensors)
Exemple #2
0
        def map_fn(features, labels):
            """Creates a mapping from base_preprocessor specs to meta specs.

      Args:
        features: Features according to the spec structure of the
          base_preprocessor.
        labels: Labels according to the spec structure of the base_preprocessor.

      Returns:
        meta_features: The regrouped features according to the meta feature
          spec.
        meta_labels: The regrouped labels according to the meta label spec.
      """
            # Now that we know all samples have the right batch size we can separate
            # them into data for conditioning and evaluation.
            features = nest.map_structure(_verify_batch_size, features)
            labels = nest.map_structure(_verify_batch_size, labels)

            condition = TSpecStructure()
            inference = TSpecStructure()
            condition.features = utils.flatten_spec_structure(
                nest.map_structure(_split_batch_into_condition, features))
            inference.features = utils.flatten_spec_structure(
                nest.map_structure(_split_batch_into_inference, features))
            condition.labels = utils.flatten_spec_structure(
                nest.map_structure(_split_batch_into_condition, labels))
            meta_labels = utils.flatten_spec_structure(
                nest.map_structure(_split_batch_into_inference, labels))
            meta_features = TSpecStructure()
            meta_features.condition = condition
            meta_features.inference = inference
            return meta_features, meta_labels
Exemple #3
0
def create_maml_feature_spec(feature_spec, label_spec):
    """Create a meta feature from existing base_model specs.

  Note, the train spec will maintain the same name and thus mapping to the
  input. This is important to create the parse_tf_example_fn automatically.
  The validation data will have a val/ prefix such that we can feed different
  data to both inputs.

  Args:
    feature_spec: A hierarchy of TensorSpecs(subclasses) or Tensors.
    label_spec: A hierarchy of TensorSpecs(subclasses) or Tensors.

  Returns:
    An instance of TensorSpecStruct representing a valid
    meta learning tensor_spec with .condition and .inference access.
  """
    condition_spec = TSpecStructure()
    condition_spec.features = utils.flatten_spec_structure(
        utils.copy_tensorspec(feature_spec,
                              batch_size=-1,
                              prefix='condition_features'))
    condition_spec.labels = utils.flatten_spec_structure(
        utils.copy_tensorspec(label_spec,
                              batch_size=-1,
                              prefix='condition_labels'))
    inference_spec = TSpecStructure()
    inference_spec.features = utils.flatten_spec_structure(
        utils.copy_tensorspec(feature_spec,
                              batch_size=-1,
                              prefix='inference_features'))

    meta_feature_spec = TSpecStructure()
    meta_feature_spec.condition = condition_spec
    meta_feature_spec.inference = inference_spec
    return meta_feature_spec
def _create_meta_spec(tensor_spec, spec_type, num_train_samples_per_task,
                      num_val_samples_per_task):
    """Create a TrainValPair from an existing spec.

  Note, the train spec will maintain the same name and thus mapping to the
  input. This is important to create the parse_tf_example_fn automatically.
  The validation data will have a val/ prefix such that we can feed different
  data to both inputs.

  Args:
    tensor_spec: A dict, (named)tuple, list or a hierarchy thereof filled by
      TensorSpecs(subclasses) or Tensors.
    spec_type: A string ['features', 'labels'] specifying which spec type we
      alter in order to introduce the corresponding val_mode.
    num_train_samples_per_task: Number of training samples to expect per task
      batch element.
    num_val_samples_per_task: Number of val examples to expect per task batch
      element.

  Raises:
    ValueError: If the spec_type is not in ['features', 'labels'].

  Returns:
    An instance of TensorSpecStruct representing a valid
    meta learning tensor_spec with .train and .val access.
  """
    if spec_type not in ['features', 'labels']:
        raise ValueError('We only support spec_type "features" or "labels" '
                         'but received {}.'.format(spec_type))
    train_tensor_spec = utils.flatten_spec_structure(
        utils.copy_tensorspec(tensor_spec,
                              batch_size=num_train_samples_per_task,
                              prefix='train'))
    # Since the train part is also required for inference, the specs cannot be
    # optional.
    for key, value in train_tensor_spec.items():
        train_tensor_spec[key] = utils.ExtendedTensorSpec.from_spec(
            value, is_optional=False)

    val_tensor_spec = utils.flatten_spec_structure(
        utils.copy_tensorspec(tensor_spec,
                              batch_size=num_val_samples_per_task,
                              prefix='val'))

    # Since the train part is also required for inference, the specs for
    # val cannot be optional because the inputs to a while loop have to be
    # the same for every step of the loop.
    for key, value in val_tensor_spec.items():
        val_tensor_spec[key] = utils.ExtendedTensorSpec.from_spec(
            value, is_optional=False)
    val_mode_shape = (1, )
    if num_train_samples_per_task is None:
        val_mode_shape = ()
    val_mode = TensorSpec(shape=val_mode_shape,
                          dtype=tf.bool,
                          name='val_mode/{}'.format(spec_type))
    return utils.flatten_spec_structure(
        TrainValPair(train=train_tensor_spec,
                     val=val_tensor_spec,
                     val_mode=val_mode))
Exemple #5
0
    def test_pack_flat_sequence_to_spec_structure(self):
        subset_placeholders = utils.make_placeholders(mock_nested_subset_spec)
        flattened_subset_placeholders = utils.flatten_spec_structure(
            subset_placeholders)
        packed_subset_placeholders = utils.pack_flat_sequence_to_spec_structure(
            mock_nested_subset_spec, flattened_subset_placeholders)
        utils.assert_equal(subset_placeholders, packed_subset_placeholders)
        utils.assert_equal(mock_nested_subset_spec,
                           packed_subset_placeholders,
                           ignore_batch=True)

        placeholders = utils.make_placeholders(mock_nested_spec)
        flattened_placeholders = utils.flatten_spec_structure(placeholders)
        packed_placeholders = utils.pack_flat_sequence_to_spec_structure(
            mock_nested_subset_spec, flattened_placeholders)
        # We only subselect what we need in pack_flat_sequence_to_spec_structure,
        # hence, we should recover what we wanted.
        utils.assert_equal(mock_nested_subset_spec,
                           packed_placeholders,
                           ignore_batch=True)
        utils.assert_equal(subset_placeholders, packed_placeholders)

        packed_optional_placeholders = utils.pack_flat_sequence_to_spec_structure(
            mock_nested_optional_spec, flattened_placeholders)
        # Although mock_nested_optional_spec would like more tensors
        # flattened_placeholders cannot provide them, fortunately they are optional.
        utils.assert_required(packed_optional_placeholders, placeholders)
        utils.assert_required(mock_nested_spec,
                              packed_optional_placeholders,
                              ignore_batch=True)
Exemple #6
0
def select_mode(val_mode, train, val):
  # nest.map_structure requires `native` python dicts as input since it uses
  # the c interface.
  val_dict = utils.flatten_spec_structure(val).to_dict()
  train_dict = utils.flatten_spec_structure(train).to_dict()
  select_mode_fn = lambda train, val: tf.where(val_mode, x=val, y=train)
  return utils.TensorSpecStruct(
      nest.map_structure(select_mode_fn, train_dict, val_dict).items())
 def get_in_feature_specification(self, mode):
     feature_spec = tensorspec_utils.flatten_spec_structure(
         self._model_feature_specification_fn(mode))
     # This will raise since we only cast from tf.float32 to tf.bfloat16.
     feature_spec.data_bfloat16 = tensorspec_utils.ExtendedTensorSpec.from_spec(
         spec=feature_spec.data_bfloat16, dtype=tf.int32)
     return feature_spec
Exemple #8
0
def create_metaexample_spec(
    model_spec,
    num_samples_per_task,
    prefix):
  """Converts a model feature/label spec into a MetaExample spec.

  Args:
    model_spec: The base model tensor spec.
    num_samples_per_task: Number of episodes in the task.
    prefix: The tf.Example feature column name prefix.
  Returns:
    A TSpecStructure. For each spec in model_spec, the output contains
    num_samples_per_task corresponding specs stored as: "<name>/i".
  """
  model_spec = utils.flatten_spec_structure(model_spec)
  meta_example_spec = TSpecStructure()

  for key in model_spec.keys():
    for i in range(num_samples_per_task):
      spec = model_spec[key]
      name_prefix = '{:s}_ep{:d}'.format(prefix, i)
      new_name = name_prefix + '/' + six.ensure_str(spec.name)
      meta_example_spec[key + '/{:}'.format(i)] = (
          utils.ExtendedTensorSpec.from_spec(
              spec, name=new_name))
  return meta_example_spec
Exemple #9
0
    def serving_input_receiver_fn():
      """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
      # We only assume one input, a string which containes the serialized proto.
      receiver_tensors = {
          'input_example_tensor':
              tf.placeholder(
                  dtype=tf.string, shape=[None], name='input_example_tensor')
      }
      feature_spec = self._get_input_features_for_receiver_fn()
      # We have to filter our specs since only required tensors are
      # used for inference time.
      flat_feature_spec = tensorspec_utils.flatten_spec_structure(feature_spec)
      required_feature_spec = (
          tensorspec_utils.filter_required_flat_tensor_spec(flat_feature_spec))

      tensor_dict, tensor_spec_dict = (
          tensorspec_utils.tensorspec_to_feature_dict(required_feature_spec))

      parse_tf_example_fn = tfdata.create_parse_tf_example_fn(
          tensor_dict=tensor_dict,
          tensor_spec_dict=tensor_spec_dict,
          feature_tspec=feature_spec)

      features = parse_tf_example_fn(receiver_tensors['input_example_tensor'])

      if (not self._export_raw_receivers and self._preprocess_fn is not None):
        features, _ = self._preprocess_fn(features=features, labels=None)

      return tf.estimator.export.ServingInputReceiver(features,
                                                      receiver_tensors)
Exemple #10
0
  def test_create_metaexample_spec(self):
    feature_spec = TSpec()
    feature_spec.image = utils.ExtendedTensorSpec(
        shape=_DEFAULT_IN_IMAGE_SHAPE,
        dtype=tf.uint8,
        is_optional=False,
        data_format='jpeg',
        name='state/image')
    feature_spec.action = utils.ExtendedTensorSpec(
        shape=_DEFAULT_ACTION_SHAPE,
        dtype=tf.float32,
        is_optional=False,
        name='state/action')

    num_samples_in_task = 3
    metaexample_spec = preprocessors.create_metaexample_spec(
        feature_spec, num_samples_in_task, 'condition')

    flat_feature_spec = utils.flatten_spec_structure(feature_spec)
    self.assertLen(
        list(metaexample_spec.keys()),
        num_samples_in_task * len(list(flat_feature_spec.keys())))

    for key in flat_feature_spec:
      for i in range(num_samples_in_task):
        meta_example_key = six.ensure_str(key) + '/{:d}'.format(i)
        self.assertIn(meta_example_key, list(metaexample_spec.keys()))
        self.assertTrue(
            six.ensure_str(metaexample_spec[meta_example_key].name).startswith(
                'condition_ep'))
Exemple #11
0
    def test_filter_required_tensor_spec_struct(self):
        tensor_spec_struct = utils.flatten_spec_structure(
            mock_nested_optional_spec)
        self.assertDictEqual(
            tensor_spec_struct, {
                'train/images': T1,
                'train/actions': T2,
                'test/images': T1,
                'test/actions': T2,
                'optional/images': O4,
                'optional/actions': O6,
            })
        required_tensor_spec_struct = utils.filter_required_flat_tensor_spec(
            tensor_spec_struct)
        self.assertDictEqual(
            required_tensor_spec_struct, {
                'train/images': T1,
                'train/actions': T2,
                'test/images': T1,
                'test/actions': T2,
            })

        # In case we pass a hierarchical spec this function should raise.
        with self.assertRaises(ValueError):
            required_tensor_spec_struct = utils.filter_required_flat_tensor_spec(
                mock_nested_optional_spec)
Exemple #12
0
 def test_flatten_spec_structure(self):
     flatten_spec_structure = utils.flatten_spec_structure(
         mock_nested_subset_spec)
     self.assertDictEqual(flatten_spec_structure, {
         'train/images': T1,
         'train/actions': T2
     })
Exemple #13
0
    def predict(self, features):
        """Predicts based on feature input using the loaded model.

    Args:
      features: A dict containing the features used for predictions.

    Returns:
      The result of the queried model predictions.
    """

        self.assert_is_loaded()
        # If using an action-tiled model, the action tiling must align with the spec
        # structure. If the supplied inputs align with the batch-tiled action,
        # expand the input to feed the tiled batch elements.
        flattened_feature_spec = tensorspec_utils.flatten_spec_structure(
            self.get_feature_specification())

        def _maybe_expand_dim(path, val):
            model_spec = flattened_feature_spec.get(path)
            if model_spec and model_spec.shape.as_list() == list(val.shape):
                return np.expand_dims(val, 0)
            return val

        features = {
            k: _maybe_expand_dim(k, val)
            for k, val in features.items()
        }
        return self._predict_fn(features)
    def create_warmup_requests_numpy(self, batch_sizes, export_dir):
        """Creates warm-up requests for a given feature specification.

    This writes an output file in
    `export_dir/assets.extra/tf_serving_warmup_requests` for use with Servo.

    Args:
      batch_sizes: Batch sizes of warm-up requests to write.
      export_dir: Base directory for the export.

    Returns:
      The filename written.
    """
        feature_spec = self._get_input_features_for_receiver_fn()

        flat_feature_spec = tensorspec_utils.flatten_spec_structure(
            feature_spec)
        tf.io.gfile.makedirs(export_dir)
        request_filename = os.path.join(export_dir,
                                        'tf_serving_warmup_requests')
        with tf.python_io.TFRecordWriter(request_filename) as writer:
            for batch_size in batch_sizes:
                request = predict_pb2.PredictRequest()
                request.model_spec.name = self._model_name
                numpy_feature_specs = tensorspec_utils.make_constant_numpy(
                    flat_feature_spec, constant_value=0, batch_size=batch_size)

                for key, numpy_spec in numpy_feature_specs.items():
                    request.inputs[key].CopyFrom(
                        contrib_util.make_tensor_proto(numpy_spec))

                log = prediction_log_pb2.PredictionLog(
                    predict_log=prediction_log_pb2.PredictLog(request=request))
                writer.write(log.SerializeToString())
        return request_filename
Exemple #15
0
 def test_tensor_spec_struct_composing_with_namedtuple(self):
     # Test that we can combine namedtuple and TensorSpecStruct.
     # This is important since non top level dicts use the dict_view but
     # nest.flatten uses the cpython interface which is why we need to maintain
     # a local copy of the data in subviews.
     test_spec = MockNested(train={
         'a': np.ones(1),
         'b': 2 * np.ones(1)
     },
                            test=MockBar(images=T1, actions=T2))
     ref_flat_spec = utils.flatten_spec_structure(test_spec)
     new_test_spec = MockNested(train=ref_flat_spec.train,
                                test=MockBar(images=T1, actions=T2))
     new_flat_spec = utils.flatten_spec_structure(new_test_spec)
     for key in ref_flat_spec:
         self.assertIn(key, new_flat_spec)
         self.assertEqual(new_flat_spec[key], ref_flat_spec[key])
Exemple #16
0
        def serving_input_receiver_fn():
            """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
            # We only assume one input, a string which containes the serialized proto.
            receiver_tensors = {
                'input_example_tensor':
                tf.placeholder(dtype=tf.string,
                               shape=[None],
                               name='input_example_tensor')
            }
            # We have to filter our specs since only required tensors are
            # used for inference time.
            flat_feature_spec = tensorspec_utils.flatten_spec_structure(
                self._feature_spec)

            # We need to freeze the conditioning and inference shapes.
            for key, value in flat_feature_spec.condition.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_condition_samples_per_task] + ref_shape[1:]
                flat_feature_spec.condition[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            for key, value in flat_feature_spec.inference.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_inference_samples_per_task] + ref_shape[1:]
                flat_feature_spec.inference[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            required_feature_spec = (
                tensorspec_utils.filter_required_flat_tensor_spec(
                    flat_feature_spec))

            tensor_dict, tensor_spec_dict = (
                tensorspec_utils.tensorspec_to_feature_dict(
                    required_feature_spec))

            parse_tf_example_fn = tfdata.create_parse_tf_example_fn(
                tensor_dict=tensor_dict,
                tensor_spec_dict=tensor_spec_dict,
                feature_tspec=self._feature_spec)

            features = parse_tf_example_fn(
                receiver_tensors['input_example_tensor'])

            if self._preprocess_fn is not None:
                features, _ = self._preprocess_fn(
                    features=features,
                    labels=None,
                    mode=tf.estimator.ModeKeys.PREDICT)

            return tf.estimator.export.ServingInputReceiver(
                features, receiver_tensors)
Exemple #17
0
def tile_val_mode(pair):
  """Tile val_mode to num_tasks * num_train_samples_per_task batch elements.

  Args:
    pair: TensorSpecStruct whose tensors have shape [num_tasks,
      num_train_samples_per_task, ...].

  Raises:
    ValueError: If num_train_samples does not match num_val_samples.
  """
  train_tensor = list(utils.flatten_spec_structure(pair.train).values())[0]
  num_train_samples_per_task = train_tensor.shape.as_list()[1]
  val_tensor = list(utils.flatten_spec_structure(pair.val).values())[0]
  num_val_samples_per_task = val_tensor.shape.as_list()[1]
  if num_train_samples_per_task != num_val_samples_per_task:
    raise ValueError('Flattening example and batch dimensions requires '
                     'num_train_samples and num_val_samples to be the same.')
  pair.val_mode = tf.tile(pair.val_mode, [num_train_samples_per_task, 1])
Exemple #18
0
        def serving_input_receiver_fn():
            """Create the ServingInputReceiver to export a saved model.

      Returns:
        An instance of ServingInputReceiver.
      """
            # We have to filter our specs since only required tensors are
            # used for inference time.
            flat_feature_spec = tensorspec_utils.flatten_spec_structure(
                self._feature_spec)
            # We need to freeze the conditioning and inference shapes.
            for key, value in flat_feature_spec.condition.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_condition_samples_per_task] + ref_shape[1:]
                flat_feature_spec.condition[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            for key, value in flat_feature_spec.inference.items():
                ref_shape = value.shape.as_list()
                shape = [self._num_inference_samples_per_task] + ref_shape[1:]
                flat_feature_spec.inference[key] = (
                    tensorspec_utils.ExtendedTensorSpec.from_spec(value,
                                                                  shape=shape))

            required_feature_spec = (
                tensorspec_utils.filter_required_flat_tensor_spec(
                    flat_feature_spec))
            receiver_tensors = tensorspec_utils.make_placeholders(
                required_feature_spec)

            # We want to ensure that our feature processing pipeline operates on a
            # copy of the features and does not alter the receiver_tensors.
            features = tensorspec_utils.flatten_spec_structure(
                copy.copy(receiver_tensors))

            if self._preprocess_fn is not None:
                features, _ = self._preprocess_fn(
                    features=features,
                    labels=None,
                    mode=tf.estimator.ModeKeys.PREDICT)

            return tf.estimator.export.ServingInputReceiver(
                features, receiver_tensors)
    def get_out_label_specification(self, mode):
        """The specification for the output labels after executing preprocess_fn.

    Arguments:
      mode: mode key for this feature specification
    Returns:
      A TensorSpecStruct describing the required and optional tensors.
    """
        return tensorspec_utils.flatten_spec_structure(
            self._model_label_specification_fn(mode))
Exemple #20
0
def print_spec(tensor_spec):
    """Iterate over a spec and print its values in sorted order.

  Args:
    tensor_spec: A dict, (named)tuple, list or a hierarchy thereof filled by
      TensorSpecs(subclasses) or Tensors.
  """
    for key, value in sorted(
            tensorspec_utils.flatten_spec_structure(tensor_spec).items()):
        logging.info('%s: %s', key, value)
    def get_in_feature_specification(self, mode):
        """The specification for the input features for the preprocess_fn.

    Arguments:
      mode: mode key for this feature specification
    Returns:
      A TensorSpecStruct describing the required and optional tensors.
    """
        return tensorspec_utils.flatten_spec_structure(
            self._model_feature_specification_fn(mode))
 def get_in_feature_specification(self, mode):
     """See base class."""
     feature_spec = tensorspec_utils.copy_tensorspec(
         self._model_feature_specification_fn(mode))
     true_img_shape = feature_spec.image.shape.as_list()
     true_img_shape[
         -3:-1] = self._src_img_res  # Overwrite the H, W dimensions.
     feature_spec.image = TensorSpec.from_spec(feature_spec.image,
                                               shape=true_img_shape,
                                               dtype=tf.uint8)
     return tensorspec_utils.flatten_spec_structure(feature_spec)
Exemple #23
0
def create_maml_label_spec(label_spec):
    """Create a meta feature from existing base_model specs.

  Args:
    label_spec: A hierarchy of TensorSpecs(subclasses) or Tensors.

  Returns:
    An instance of TensorSpecStruct representing a valid
    meta learning tensor_spec for computing the outer loss.
  """
    return utils.flatten_spec_structure(
        utils.copy_tensorspec(label_spec, batch_size=-1, prefix='meta_labels'))
Exemple #24
0
    def infer_base_model_output_dtypes(self, mode, params):
        """Infer the dtypes of the model in a separate graph.

    Args:
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.

    Returns:
      dtypes: A dict containing all output dtypes {str: dtype}.
    """
        dtype_inference_graph = tf.Graph()
        with dtype_inference_graph.as_default():
            with tf.variable_scope('IGNORE_ONLY_TO_INFER_OUTPUT_DTYPES'):
                # In this graph we can now create placeholders in order to infer the
                # right dtype of the outputs.
                feature_spec = self.get_feature_specification(mode)
                features_dtype = utils.make_placeholders(
                    feature_spec.condition.features, batch_size=-1)
                labels_dtype = utils.make_placeholders(
                    feature_spec.condition.labels, batch_size=-1)
                # We need to infer the output dtypes.
                features_condition = utils.flatten_spec_structure(
                    features_dtype)
                labels_condition = utils.flatten_spec_structure(labels_dtype)
                infered_outputs = self._base_model.inference_network_fn(
                    features=features_condition,
                    labels=labels_condition,
                    mode=mode,
                    params=params)
                dtypes = {}
                for key, value in infered_outputs.items():
                    dtypes[key] = value.dtype
                return dtypes
Exemple #25
0
 def get_in_feature_specification(self, mode):
     # Tempting to use create_maml_feature_spec, but we don't want the
     # prefixes or the meta-batch dimension.
     condition_spec = TSpecStructure()
     condition_spec.features = (
         self._base_preprocessor.get_in_feature_specification(mode))
     condition_spec.labels = (
         self._base_preprocessor.get_in_label_specification(mode))
     inference_spec = TSpecStructure()
     inference_spec.features = (
         self._base_preprocessor.get_in_feature_specification(mode))
     feature_spec = TSpecStructure()
     feature_spec.condition = create_metaexample_spec(
         condition_spec, self._num_condition_samples_per_task, 'condition')
     feature_spec.inference = create_metaexample_spec(
         inference_spec, self._num_inference_samples_per_task, 'inference')
     return utils.flatten_spec_structure(feature_spec)
Exemple #26
0
def unflatten_batch_examples(tensor_collection, num_samples_per_task):
    """Unflatten task and samples dimension for M-L algorithms like RL^2.

  Args:
    tensor_collection: Tensor collection whose tensors have shape [num_tasks *
      num_samples_per_task, ...].
    num_samples_per_task: The number of samples we have per task.

  Returns:
    result_unflattened: A tensor_collection with the same elements but the
      shape has been changed to [num_tasks, num_train_samples_per_task, ...].
  """
    result_unflattened = utils.flatten_spec_structure(tensor_collection)
    for key, value in result_unflattened.items():
        result_unflattened[key] = tf.reshape(
            value, [-1, num_samples_per_task] + value.shape.as_list()[1:])
    return result_unflattened
Exemple #27
0
  def get_in_feature_specification(self, mode
                                  ):
    """See base class."""
    feature_spec = tensorspec_utils.copy_tensorspec(
        self._model_feature_specification_fn(mode))
    # Don't want to parse the original_image, since we don't want to parse it
    # and we are adding this feature in preprocess_fn to satisfy the model's
    # inputs.
    if mode != PREDICT and 'original_image' in feature_spec:
      del feature_spec['original_image']

    if 'image' in feature_spec:
      true_img_shape = feature_spec.image.shape.as_list()
      # Overwrite the H, W dimensions.
      true_img_shape[-3:-1] = self._src_img_res
      feature_spec.image = TensorSpec.from_spec(
          feature_spec.image, shape=true_img_shape, dtype=tf.uint8)
    return tensorspec_utils.flatten_spec_structure(feature_spec)
Exemple #28
0
    def _map_task_learn(self, task_learn_fn, elems, mode, params=None):
        """Maps a task learning/adaptation function across a batch of tasks.

    Args:
      task_learn_fn: A callable which will be mapped across `elems` once `elems`
        is unpacked along the first (task) dimension.
      elems: A (possibly) nested structure of tensors. Its tensors will be
        unpacked along the first dimension before passing into task_learn_fn.
      mode: (ModeKeys) Specifies if this is training, evaluation, or prediction.
      params: An optional dict of hyper parameters.

    Returns:
      The stacked outputs of task_learn_fn after being mapped across `elems`.
    """

        if self._use_parallel_for:
            return pfor_map_fn(task_learn_fn, elems)
        else:
            if params is None:
                params = {}
            params['is_inner_loop'] = True
            dtypes = self.infer_base_model_output_dtypes(mode, params)

            # We flatten the features to infer the batch_size for parallel iterations.
            # The flattened TensorSpecStruct enables us to get the
            # first element of condition without knowning the name.
            parallel_iterations = list(
                utils.flatten_spec_structure(
                    elems).values())[0].get_shape().as_list()[0]
            # Use parallel execution per batch, if we don't know the batch_size we
            # use the standard.
            if parallel_iterations is None:
                parallel_iterations = 10

            # The output for val, the inner loop training steps, and training loss.
            dtype = ([dtypes] * 2, [dtypes] * (self._num_inner_loop_steps + 1),
                     [tf.float32] * (self._num_inner_loop_steps + 1))

            return tf.map_fn(task_learn_fn,
                             elems=elems,
                             dtype=dtype,
                             parallel_iterations=parallel_iterations)
Exemple #29
0
    def test_is_flat_spec_or_tensors_structure(self):
        flatten_spec_structure = utils.flatten_spec_structure(
            mock_nested_subset_spec)
        self.assertFalse(
            utils.is_flat_spec_or_tensors_structure(mock_nested_subset_spec))
        self.assertTrue(
            utils.is_flat_spec_or_tensors_structure(flatten_spec_structure))

        # Flat lists are not tensor_spec_structs since they are not proper
        # {path: spec_or_tensor_or_numpy} structures. They would not be invariant
        # under flatten_spec_structure calls.
        self.assertFalse(utils.is_flat_spec_or_tensors_structure([T1, T2]))

        # Flat dictionaries are proper tensor_spec_struct_or_tensors, they would
        # be invariant under flatten_spec_structure.
        self.assertTrue(
            utils.is_flat_spec_or_tensors_structure({
                't1': T1,
                't2': T2
            }))
    def get_in_feature_specification(self, mode):
        """The specification for the input features before executing preprocess_fn.

    Arguments:
      mode: mode key for this feature specification

    Returns:
      A TensorSpecStruct describing the required and optional tensors supporting
      both dictionary and hierarchical attribute like access.
    """
        # We transform the tensorspec into it's flat structure.
        # This allows us to easily replace specs and replace tensors during
        # preprocessing.
        # Note, this will not alter with the spec itself, it merely allows the
        # user of this function to easily apply the proper preprocessing.
        # Further, flattening the spec structure will make a copy, allowing
        # us to safely operate on the in_feature_specification without
        # altering a implicit state or changing the model_feature_specification.
        tensor_spec_struct = tensorspec_utils.flatten_spec_structure(
            self._model_feature_specification_fn(mode))
        return self._transform_in_feature_specification(tensor_spec_struct)