def get_features_predictions_labels_dicts(
      self) -> Tuple[types.TensorTypeMaybeDict, types.TensorTypeMaybeDict, types
                     .TensorTypeMaybeDict]:
    """Returns features, predictions, labels dictionaries (or values).

    The dictionaries contain references to the nodes, so they can be used
    to construct new metrics similarly to how metrics can be constructed in
    the Trainer.

    Returns:
      Tuple of features, predictions, labels dictionaries (or values).
    """
    features = {}
    for key, value in self._features_map.items():
      features[key] = value

    predictions = {}
    for key, value in self._predictions_map.items():
      predictions[key] = value
    # Unnest if it wasn't a dictionary to begin with.
    default_predictions_key = util.default_dict_key(
        eval_constants.PREDICTIONS_NAME)
    if list(predictions.keys()) == [default_predictions_key]:
      predictions = predictions[default_predictions_key]

    labels = {}
    for key, value in self._labels_map.items():
      labels[key] = value
    # Unnest if it wasn't a dictionary to begin with.
    default_labels_key = util.default_dict_key(eval_constants.LABELS_NAME)
    if list(labels.keys()) == [default_labels_key]:
      labels = labels[default_labels_key]

    return (features, predictions, labels)
示例#2
0
def load_predictions(
        signature_def: tf.compat.v1.MetaGraphDef.SignatureDefEntry,
        graph: tf.Graph) -> Dict[str, types.TensorType]:
    """Loads prediction nodes from signature_def.outputs.

  Args:
    signature_def: SignatureDef to lookup nodes in.
    graph: TensorFlow graph to lookup the nodes in.

  Returns:
    Predictions map as an OrderedDict.
  """
    # The canonical ordering we use here is simply the ordering we get
    # from the predictions collection.
    predictions = extract_signature_inputs_or_outputs_with_prefix(
        constants.PREDICTIONS_NAME, signature_def.outputs,
        util.default_dict_key(constants.PREDICTIONS_NAME))
    predictions_map = collections.OrderedDict()
    for k, v in predictions.items():
        # Extract to dictionary with a single key for consistency with
        # how features and labels are extracted.
        predictions_map[
            k] = tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info(
                v, graph)
    return predictions_map
示例#3
0
 def fpl_dict(fetched, group):
     native = fetched.values[group]
     wrapped = {}
     if not isinstance(native, dict):
         native = {util.default_dict_key(group): native}
     for key in native:
         wrapped[key] = {encoding.NODE_SUFFIX: native[key]}
     return wrapped
示例#4
0
 def __init__(self, prediction_key: Text):
     if not prediction_key:
         # If prediction key is set to the empty string, the user is telling us
         # that their Estimator returns a predictions Tensor rather than a
         # dictionary. Set the key to the magic key we use in that case.
         self._prediction_key = eval_saved_model_util.default_dict_key(
             eval_saved_model_constants.PREDICTIONS_NAME)
     else:
         self._prediction_key = prediction_key
示例#5
0
 def fpl_dict(fetched: FetchedTensorValues,
              group: Text) -> types.DictOfFetchedTensorValues:
     native = fetched.values[group]
     wrapped = {}
     if not isinstance(native, dict):
         native = {util.default_dict_key(group): native}
     for key in native:
         wrapped[key] = {encoding.NODE_SUFFIX: native[key]}
     return wrapped
示例#6
0
def _add_tfma_collections(features: types.TensorTypeMaybeDict,
                          labels: Optional[types.TensorTypeMaybeDict],
                          input_refs: types.TensorType):
    """Add extra collections for features, labels, input_refs, version.

  This should be called within the Graph that will be saved. Typical usage
  would be when features and labels have been parsed, i.e. in the
  input_receiver_fn.

  Args:
    features: dict of strings to tensors representing features
    labels: dict of strings to tensors or a single tensor
    input_refs: See EvalInputReceiver().
  """
    # Clear existing collections first, in case the EvalInputReceiver was called
    # multiple times.
    del tf.compat.v1.get_collection_ref(
        encoding.with_suffix(encoding.FEATURES_COLLECTION,
                             encoding.KEY_SUFFIX))[:]
    del tf.compat.v1.get_collection_ref(
        encoding.with_suffix(encoding.FEATURES_COLLECTION,
                             encoding.NODE_SUFFIX))[:]
    del tf.compat.v1.get_collection_ref(
        encoding.with_suffix(encoding.LABELS_COLLECTION,
                             encoding.KEY_SUFFIX))[:]
    del tf.compat.v1.get_collection_ref(
        encoding.with_suffix(encoding.LABELS_COLLECTION,
                             encoding.NODE_SUFFIX))[:]
    del tf.compat.v1.get_collection_ref(encoding.EXAMPLE_REF_COLLECTION)[:]
    del tf.compat.v1.get_collection_ref(encoding.TFMA_VERSION_COLLECTION)[:]

    for feature_name, feature_node in features.items():
        _encode_and_add_to_node_collection(encoding.FEATURES_COLLECTION,
                                           feature_name, feature_node)

    if labels is not None:
        # Labels can either be a Tensor, or a dict of Tensors.
        if not isinstance(labels, dict):
            labels = {util.default_dict_key(constants.LABELS_NAME): labels}

        for label_key, label_node in labels.items():
            _encode_and_add_to_node_collection(encoding.LABELS_COLLECTION,
                                               label_key, label_node)
    # Previously input_refs was called example_ref. This code is being deprecated
    # so it was not renamed.
    example_ref_collection = tf.compat.v1.get_collection_ref(
        encoding.EXAMPLE_REF_COLLECTION)
    example_ref_collection.append(encoding.encode_tensor_node(input_refs))

    tf.compat.v1.add_to_collection(encoding.TFMA_VERSION_COLLECTION,
                                   version.VERSION)
    def __init__(self, label_key: str, weight_key: str):
        """Initialize.

    Args:
      label_key: The key in the labels dictionary which holds the label. Set
        this to empty to if labels is a Tensor and not a dictionary.
      weight_key: The key in the features dictionary which holds the weights.
        Note that the weight value must be identical across all examples in the
        same query. If set to empty, uses 1.0 instead.
    """
        if not label_key:
            # If label_key is set to the empty string, the user is telling us
            # that their Estimator returns a labels Tensor rather than a
            # dictionary. Set the key to the magic key we use in that case.
            self._label_key = eval_saved_model_util.default_dict_key(
                eval_saved_model_constants.LABELS_NAME)
        else:
            self._label_key = label_key
        self._weight_key = weight_key
示例#8
0
def load_additional_inputs(
    prefix,
    signature_def,
    graph,
):
    """Loads additional input tensors from signature_def.inputs.

  Args:
    prefix: Prefix used for tensors in signature_def.inputs (e.g. features,
      labels, etc)
    signature_def: SignatureDef to lookup nodes in.
    graph: TensorFlow graph to lookup the nodes in.

  Returns:
    OrderedDict of tensors.
  """
    tensors = collections.OrderedDict()
    for k, v in extract_signature_inputs_or_outputs_with_prefix(
            prefix, signature_def.inputs,
            util.default_dict_key(prefix)).items():
        tensors[k] = tf.saved_model.utils.get_tensor_from_tensor_info(v, graph)
    return tensors