def _find_label_domain(
    eval_config: config.EvalConfig, schema: schema_pb2.Schema,
    model_name: Text, output_name: Text
) -> Tuple[Optional[Union[int, float]], Optional[Union[int, float]]]:
    """Find the min and max value for the label_key for this model / output."""
    model_spec = model_util.get_model_spec(eval_config, model_name)
    if not model_spec:
        return None, None
    label_key = model_util.get_label_key(model_spec, output_name)
    if not label_key:
        return None, None
    label_schema = None
    for feature_schema in schema.feature:
        if feature_schema.name == label_key:
            label_schema = feature_schema
            break
    if label_schema is None:
        return None, None

    # Find the domain
    if label_schema.HasField('int_domain'):
        label_domain = label_schema.int_domain
    elif label_schema.HasField('float_domain'):
        label_domain = label_schema.float_domain
    else:
        return None, None

    left, right = None, None
    if label_domain.HasField('min'):
        left = float(label_domain.min)
    if label_domain.HasField('max'):
        right = float(label_domain.max)
    return left, right
예제 #2
0
 def _create_accumulator(
         self) -> tf_metric_accumulators.TFCompilableMetricsAccumulator:
     padding_options = None
     if self._eval_config is not None:
         model_spec = model_util.get_model_spec(self._eval_config,
                                                self._model_name)
         if model_spec is not None and model_spec.HasField(
                 'padding_options'):
             padding_options = model_spec.padding_options
     return tf_metric_accumulators.TFCompilableMetricsAccumulator(
         padding_options,
         self._output_counts,
         desired_batch_size=self._desired_batch_size)
예제 #3
0
    def create_accumulator(
            self) -> tf_metric_accumulators.TFCompilableMetricsAccumulator:
        configs = zip(self._metric_configs, self._loss_configs)
        padding_options = None
        if self._eval_config is not None:
            model_spec = model_util.get_model_spec(self._eval_config,
                                                   self._model_name)
            if model_spec is not None and model_spec.HasField(
                    'padding_options'):
                padding_options = model_spec.padding_options

        return tf_metric_accumulators.TFCompilableMetricsAccumulator(
            padding_options, [len(m) + len(l) for m, l in configs],
            desired_batch_size=self._desired_batch_size)
예제 #4
0
 def _get_example_weights(self, model_name: Text, features: Dict[Text,
                                                                 Any]) -> Any:
   spec = model_util.get_model_spec(self._eval_config, model_name)
   if not spec:
     raise ValueError(
         'Missing model_spec for model_name "{}"'.format(model_name))
   if spec.example_weight_key:
     if spec.example_weight_key not in features:
       raise ValueError(
           'Missing feature for example_weight_key "{}": features={}'.format(
               spec.example_weight_key, features))
     return features[spec.example_weight_key]
   elif spec.example_weight_keys:
     example_weights = {}
     for k, v in spec.example_weight_keys.items():
       if v not in features:
         raise ValueError(
             'Missing feature for example_weight_key "{}": features={}'.format(
                 k, features))
       example_weights[k] = features[v]
     return example_weights
   else:
     return np.array([1.0])
예제 #5
0
def default_eval_shared_model(
    eval_saved_model_path: Text,
    add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
    include_default_metrics: Optional[bool] = True,
    example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
    additional_fetches: Optional[List[Text]] = None,
    blacklist_feature_fetches: Optional[List[Text]] = None,
    tags: Optional[List[Text]] = None,
    model_name: Text = '',
    eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
  """Returns default EvalSharedModel.

  Args:
    eval_saved_model_path: Path to EvalSavedModel.
    add_metrics_callbacks: Optional list of callbacks for adding additional
      metrics to the graph (see EvalSharedModel for more information on how to
      configure additional metrics). Metrics for example count and example
      weights will be added automatically.
    include_default_metrics: True to include the default metrics that are part
      of the saved model graph during evaluation. Note that
      eval_config.options.include_default_metrics must also be true.
    example_weight_key: Example weight key (single-output model) or dict of
      example weight keys (multi-output model) keyed by output name.
    additional_fetches: Prefixes of additional tensors stored in
      signature_def.inputs that should be fetched at prediction time. The
      "features" and "labels" tensors are handled automatically and should not
      be included.
    blacklist_feature_fetches: List of tensor names in the features dictionary
      which should be excluded from the fetches request. This is useful in
      scenarios where features are large (e.g. images) and can lead to excessive
      memory use if stored.
    tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
    model_name: Optional name of the model being created (should match
      ModelSpecs.name). The name should only be provided if multiple models are
      being evaluated.
    eval_config: Eval config. Only used for setting default tags.
  """
  if not eval_config:
    model_type = constants.TF_ESTIMATOR
    if tags is None:
      tags = [eval_constants.EVAL_TAG]
  else:
    model_spec = model_util.get_model_spec(eval_config, model_name)
    if not model_spec:
      raise ValueError('ModelSpec for model name {} not found in EvalConfig: '
                       'config={}'.format(model_name, eval_config))
    model_type = model_util.get_model_type(model_spec, eval_saved_model_path,
                                           tags)
    if tags is None:
      # Default to serving unless estimator is used.
      if model_type == constants.TF_ESTIMATOR:
        tags = [eval_constants.EVAL_TAG]
      else:
        tags = [tf.saved_model.SERVING]

  # Backwards compatibility for legacy add_metrics_callbacks implementation.
  if model_type == constants.TF_ESTIMATOR and eval_constants.EVAL_TAG in tags:
    # PyType doesn't know about the magic exports we do in post_export_metrics.
    # Additionally, the lines seem to get reordered in compilation, so we can't
    # just put the disable-attr on the add_metrics_callbacks lines.
    # pytype: disable=module-attr
    if not add_metrics_callbacks:
      add_metrics_callbacks = []
    # Always compute example weight and example count.
    example_count_callback = post_export_metrics.example_count()
    add_metrics_callbacks.append(example_count_callback)
    if example_weight_key:
      if isinstance(example_weight_key, dict):
        for output_name, key in example_weight_key.items():
          example_weight_callback = post_export_metrics.example_weight(
              key, metric_tag=output_name)
          add_metrics_callbacks.append(example_weight_callback)
      else:
        example_weight_callback = post_export_metrics.example_weight(
            example_weight_key)
        add_metrics_callbacks.append(example_weight_callback)
    # pytype: enable=module-attr

  return types.EvalSharedModel(
      model_name=model_name,
      model_type=model_type,
      model_path=eval_saved_model_path,
      add_metrics_callbacks=add_metrics_callbacks,
      include_default_metrics=include_default_metrics,
      example_weight_key=example_weight_key,
      additional_fetches=additional_fetches,
      model_loader=types.ModelLoader(
          tags=tags,
          construct_fn=model_util.model_construct_fn(
              eval_saved_model_path=eval_saved_model_path,
              add_metrics_callbacks=add_metrics_callbacks,
              include_default_metrics=include_default_metrics,
              additional_fetches=additional_fetches,
              blacklist_feature_fetches=blacklist_feature_fetches,
              model_type=model_type,
              tags=tags)))