Пример #1
0
EXAMPLE_COUNT_NAME = 'example_count'


class ExampleCount(metric_types.Metric):
  """Example count."""

  def __init__(self, name: Text = EXAMPLE_COUNT_NAME):
    """Initializes example count.

    Args:
      name: Metric name.
    """
    super(ExampleCount, self).__init__(_example_count, name=name)


metric_types.register_metric(ExampleCount)


def _example_count(
    name: Text = EXAMPLE_COUNT_NAME) -> metric_types.MetricComputations:
  """Returns metric computations for computing example counts."""
  key = metric_types.MetricKey(name=name)
  return [
      metric_types.MetricComputation(
          keys=[key],
          preprocessor=_ExampleCountPreprocessor(),
          combiner=_ExampleCountCombiner(key))
  ]


class _ExampleCountPreprocessor(beam.DoFn):
Пример #2
0
                    continue
            num_examples = 0.0
            if bucket_id in comparison_bucket:
                num_examples = comparison_bucket[bucket_id].weighted_examples
                comparison_pred_values += comparison_bucket[
                    bucket_id].weighted_predictions
                comparison_num_examples += num_examples

            if bucket_id in baseline_bucket:
                # To compute background/baseline re-weighted average prediction values.
                # Background re-weighting is done by dividing the in-slice ground truth
                # density by the background density so that the marginal ground truth
                # distributions of in-slice items and background items appear similar.
                weight = num_examples / baseline_bucket[
                    bucket_id].weighted_examples
                baseline_pred_values += weight * baseline_bucket[
                    bucket_id].weighted_predictions

        lift_value = (comparison_pred_values -
                      baseline_pred_values) / comparison_num_examples
        return {key: lift_value}

    cross_slice_computation = metric_types.CrossSliceMetricComputation(
        keys=[key], cross_slice_comparison=cross_slice_comparison)

    computations.append(cross_slice_computation)
    return computations


metric_types.register_metric(Lift)
    Args:
      thresholds: Optional thresholds, defaults to 0.5 if not specified. If the
        top prediction is less than a threshold then the associated example will
        be assumed to have no prediction associated with it (the
        predicted_class_id will be set to NO_PREDICTED_CLASS_ID).
      name: Metric name.
    """
        super(MultiClassConfusionMatrixAtThresholds,
              self).__init__(metric_util.merge_per_key_computations(
                  _multi_class_confusion_matrix_at_thresholds),
                             thresholds=thresholds,
                             name=name)  # pytype: disable=wrong-arg-types


metric_types.register_metric(MultiClassConfusionMatrixAtThresholds)


def _multi_class_confusion_matrix_at_thresholds(
    thresholds: Optional[List[float]] = None,
    name: Text = MULTI_CLASS_CONFUSION_MATRIX_AT_THRESHOLDS_NAME,
    eval_config: Optional[config_pb2.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
) -> metric_types.MetricComputations:
    """Returns computations for multi-class confusion matrix at thresholds."""
    if not thresholds:
        thresholds = [0.5]

    key = metric_types.MetricKey(name=name,
                                 model_name=model_name,
Пример #4
0
      num_thresholds: Number of thresholds to use. The thresholds will be evenly
        spaced between 0.0 and 1.0 and inclusive of the boundaries (i.e. to
        configure the thresholds to [0.0, 0.25, 0.5, 0.75, 1.0], the parameter
        should be set to 5). Only one of either thresholds or num_thresholds
        should be used.
      name: Metric name.
    """
        super(MultiLabelConfusionMatrixPlot,
              self).__init__(metric_util.merge_per_key_computations(
                  _multi_label_confusion_matrix_plot),
                             thresholds=thresholds,
                             num_thresholds=num_thresholds,
                             name=name)


metric_types.register_metric(MultiLabelConfusionMatrixPlot)


def _multi_label_confusion_matrix_plot(
    thresholds: Optional[List[float]] = None,
    num_thresholds: Optional[int] = None,
    name: Text = MULTI_LABEL_CONFUSION_MATRIX_PLOT_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
) -> metric_types.MetricComputations:
    """Returns computations for multi-label confusion matrix at thresholds."""
    if num_thresholds is not None and thresholds is not None:
        raise ValueError(
            'only one of thresholds or num_thresholds can be set at a time')
    if num_thresholds is None and thresholds is None:
Пример #5
0
                 name: Text = NDCG_NAME):
        """Initializes NDCG.

    Args:
      gain_key: Key of feature in features dictionary that holds gain values.
      top_k_list: Values for top k. This can also be set using the
        tfma.MetricsSpec.binarize.top_k_list associated with the metric.
      name: Metric name.
    """
        super(NDCG, self).__init__(_ndcg,
                                   gain_key=gain_key,
                                   top_k_list=top_k_list,
                                   name=name)


metric_types.register_metric(NDCG)


def _ndcg(gain_key: Text,
          top_k_list: Optional[List[int]] = None,
          name: Text = NDCG_NAME,
          eval_config: Optional[config.EvalConfig] = None,
          model_names: List[Text] = None,
          output_names: List[Text] = None,
          sub_keys: Optional[List[metric_types.SubKey]] = None,
          query_key: Text = '') -> metric_types.MetricComputations:
    """Returns metric computations for NDCG."""
    if not query_key:
        raise ValueError('a query_key is required to use NDCG metric')
    sub_keys = [k for k in sub_keys if k is not None]
    if top_k_list:
Пример #6
0
class MeanAttributions(AttributionsMetric):
    """Mean attributions metric."""
    def __init__(self, name: Text = MEAN_ATTRIBUTIONS_NAME):
        """Initializes mean attributions metric.

    Args:
      name: Attribution metric name.
    """
        super(MeanAttributions, self).__init__(
            metric_util.merge_per_key_computations(_mean_attributions),
            absolute=False,
            name=name)


metric_types.register_metric(MeanAttributions)


class MeanAbsoluteAttributions(AttributionsMetric):
    """Mean aboslute attributions metric."""
    def __init__(self, name: Text = MEAN_ABSOLUTE_ATTRIBUTIONS_NAME):
        """Initializes mean absolute attributions metric.

    Args:
      name: Attribution metric name.
    """
        super(MeanAbsoluteAttributions, self).__init__(
            metric_util.merge_per_key_computations(_mean_attributions),
            absolute=True,
            name=name)
Пример #7
0
            fomr = metric.fn[i] / (
                (metric.fn[i] + metric.tn[i]) or float('nan'))

            output[metric_key_by_name_by_threshold[threshold]
                   ['false_positive_rate']] = fpr
            output[metric_key_by_name_by_threshold[threshold]
                   ['false_negative_rate']] = fnr
            output[metric_key_by_name_by_threshold[threshold]
                   ['true_positive_rate']] = tpr
            output[metric_key_by_name_by_threshold[threshold]
                   ['true_negative_rate']] = tnr
            output[metric_key_by_name_by_threshold[threshold]
                   ['positive_rate']] = pr
            output[metric_key_by_name_by_threshold[threshold]
                   ['negative_rate']] = nr
            output[metric_key_by_name_by_threshold[threshold]
                   ['false_discovery_rate']] = fdr
            output[metric_key_by_name_by_threshold[threshold]
                   ['false_omission_rate']] = fomr

        return output

    derived_computation = metric_types.DerivedMetricComputation(keys=keys,
                                                                result=result)

    computations.append(derived_computation)
    return computations


metric_types.register_metric(FairnessIndicators)
Пример #8
0
                'negative_to_positive']
            pos_examples = flip_count_metric_key_by_name_by_threshold[
                threshold]['positive_to_negative_examples_ids']
            neg_examples = flip_count_metric_key_by_name_by_threshold[
                threshold]['negative_to_positive_examples_ids']
            pos = flip_count_metric_key_by_name_by_threshold[threshold][
                'positive_examples_count']
            neg = flip_count_metric_key_by_name_by_threshold[threshold][
                'negative_examples_count']
            output[metric_key_by_name_by_threshold[threshold]['overall']] = (
                metrics[ntp] + metrics[ptn]) / (metrics[pos] + metrics[neg])
            output[metric_key_by_name_by_threshold[threshold]
                   ['positive_to_negative']] = metrics[ptn] / metrics[pos]
            output[metric_key_by_name_by_threshold[threshold]
                   ['negative_to_positive']] = metrics[ntp] / metrics[neg]
            output[metric_key_by_name_by_threshold[threshold][
                'positive_to_negative_examples_ids']] = metrics[pos_examples]
            output[metric_key_by_name_by_threshold[threshold][
                'negative_to_positive_examples_ids']] = metrics[neg_examples]

        return output

    derived_computation = metric_types.DerivedMetricComputation(keys=keys,
                                                                result=result)

    computations.append(derived_computation)
    return computations


metric_types.register_metric(FlipRate)
  def __init__(self,
               thresholds: List[float],
               name: Text = CONFUSION_MATRIX_AT_THRESHOLDS_NAME):
    """Initializes confusion matrix at thresholds.

    Args:
      thresholds: Thresholds to use for confusion matrix.
      name: Metric name.
    """
    super(ConfusionMatrixAtThresholds, self).__init__(
        metric_util.merge_per_key_computations(_confusion_matrix_at_thresholds),
        thresholds=thresholds,
        name=name)


metric_types.register_metric(ConfusionMatrixAtThresholds)


def _confusion_matrix_at_thresholds(
    thresholds: List[float],
    name: Text = CONFUSION_MATRIX_AT_THRESHOLDS_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    sub_key: Optional[metric_types.SubKey] = None,
    class_weights: Optional[Dict[int, float]] = None
) -> metric_types.MetricComputations:
  """Returns metric computations for confusion matrix at thresholds."""
  key = metric_types.MetricKey(
      name=name,
      model_name=model_name,
               thresholds: Optional[float] = None,
               name: Text = MULTI_LABEL_CONFUSION_MATRIX_AT_THRESHOLDS_NAME):
    """Initializes multi-label confusion matrix.

    Args:
      thresholds: Optional thresholds. Defaults to [0.5].
      name: Metric name.
    """
    super(MultiLabelConfusionMatrixAtThresholds, self).__init__(
        metric_util.merge_per_key_computations(
            _multi_label_confusion_matrix_at_thresholds),
        thresholds=thresholds,
        name=name)


metric_types.register_metric(MultiLabelConfusionMatrixAtThresholds)


def _multi_label_confusion_matrix_at_thresholds(
    thresholds: Optional[List[float]] = None,
    name: Text = MULTI_LABEL_CONFUSION_MATRIX_AT_THRESHOLDS_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
) -> metric_types.MetricComputations:
  """Returns computations for multi-label confusion matrix at thresholds."""
  key = metric_types.PlotKey(
      name=name, model_name=model_name, output_name=output_name)
  return [
      metric_types.MetricComputation(
          keys=[key],
    def __init__(self,
                 thresholds: Optional[List[float]] = None,
                 name: Text = SPECIFICITY_NAME):
        """Initializes specificity metric.

    Args:
      thresholds: Thresholds to use for specificity. Defaults to [0.5].
      name: Metric name.
    """
        super(Specificity, self).__init__(
            metric_util.merge_per_key_computations(_specificity),
            thresholds=thresholds,
            name=name)


metric_types.register_metric(Specificity)


def _specificity(
    thresholds: Optional[List[float]] = None,
    name: Text = SPECIFICITY_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    sub_key: Optional[metric_types.SubKey] = None,
    class_weights: Optional[Dict[int, float]] = None
) -> metric_types.MetricComputations:
    """Returns metric computations for specificity."""
    def rate_fn(tp: float, tn: float, fp: float, fn: float) -> float:
        del tp, fn
        if tn + fp > 0.0:
Пример #12
0

class SymmetricPredictionDifference(metric_types.Metric):
    """PredictionDifference computes the avg pointwise diff between models."""
    def __init__(self, name: str = SYMMETRIC_PREDICITON_DIFFERENCE_NAME):
        """Initializes PredictionDifference metric.

    Args:
      name: Metric name.
    """

        super().__init__(symmetric_prediction_difference_computations,
                         name=name)


metric_types.register_metric(SymmetricPredictionDifference)


def symmetric_prediction_difference_computations(
        name: str = SYMMETRIC_PREDICITON_DIFFERENCE_NAME,
        eval_config: Optional[config_pb2.EvalConfig] = None,
        model_names: Optional[List[str]] = None,
        output_names: Optional[List[str]] = None,
        sub_keys: Optional[List[metric_types.SubKey]] = None,
        example_weighted: bool = False) -> metric_types.MetricComputations:
    """Returns metric computations for SymmetricPredictionDifference.

  This is not meant to be used with merge_per_key_computations because we
  don't want to create computations for the baseline model, and we want to
  provide the baseline model name to each Combiner
Пример #13
0
      sampled_key: The key whose values should be sampled
      size: The number of samples to collect (per slice)
      name: Metric name.
      random_seed: The random_seed to be used for intializing the per worker
        np.random.RandomGenerator in the CombineFn setup. Note that when more
        than one worker is used, setting this is not sufficient to guarantee
        determinism.
    """
        super().__init__(_fixed_size_sample,
                         sampled_key=sampled_key,
                         size=size,
                         name=name,
                         random_seed=random_seed)


metric_types.register_metric(FixedSizeSample)


def _fixed_size_sample(
        sampled_key: Text,
        size: int,
        name: Text,
        random_seed: Optional[int],
        model_names: Optional[List[Text]] = None,
        output_names: Optional[List[Text]] = None,
        sub_keys: Optional[List[metric_types.SubKey]] = None,
        example_weighted: bool = False) -> metric_types.MetricComputations:
    """Returns metrics computations for FixedSizeSample metrcs."""
    keys = []
    for model_name in model_names or ['']:
        for output_name in output_names or ['']:
Пример #14
0
    Args:
      name: The name of the metric to use.
      convert_to: The conversion to perform before checking equality.
    """

    super(ExactMatch, self).__init__(
        metric_util.merge_per_key_computations(_exact_match),
        name=name,
        convert_to=convert_to)
    if convert_to and convert_to not in _CONVERT_TO_VALUES:
      raise ValueError('convert_to can only be one of the following: %s' %
                       str(convert_to))


metric_types.register_metric(ExactMatch)


def _exact_match(
    name: Text,
    eval_config: Optional[config_pb2.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    sub_key: Optional[metric_types.SubKey] = None,
    aggregation_type: Optional[metric_types.AggregationType] = None,
    class_weights: Optional[Dict[int, float]] = None,
    convert_to: Optional[Text] = None) -> metric_types.MetricComputations:
  """Returns metric computations for computing the exact match score."""
  key = metric_types.MetricKey(
      name=name,
      model_name=model_name,
      num_thresholds: Number of thresholds to use. The thresholds will be evenly
        spaced between 0.0 and 1.0 and inclusive of the boundaries (i.e. to
        configure the thresholds to [0.0, 0.25, 0.5, 0.75, 1.0], the parameter
        should be set to 5). Only one of either thresholds or num_thresholds
        should be used.
      name: Metric name.
    """
        super(MultiClassConfusionMatrixPlot,
              self).__init__(metric_util.merge_per_key_computations(
                  _multi_class_confusion_matrix_plot),
                             thresholds=thresholds,
                             num_thresholds=num_thresholds,
                             name=name)  # pytype: disable=wrong-arg-types


metric_types.register_metric(MultiClassConfusionMatrixPlot)


def _multi_class_confusion_matrix_plot(
    thresholds: Optional[List[float]] = None,
    num_thresholds: Optional[int] = None,
    name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
) -> metric_types.MetricComputations:
    """Returns computations for multi-class confusion matrix at thresholds."""
    if num_thresholds is not None and thresholds is not None:
        raise ValueError(
            'only one of thresholds or num_thresholds can be set at a time')
    if num_thresholds is None and thresholds is None:
Пример #16
0
    Args:
      total_queries_name: Total queries metric name.
      total_documents_name: Total documents metric name.
      min_documents_name: Min documents name.
      max_documents_name: Max documents name.
    """
        super(QueryStatistics,
              self).__init__(_query_statistics,
                             total_queries_name=total_queries_name,
                             total_documents_name=total_documents_name,
                             min_documents_name=min_documents_name,
                             max_documents_name=max_documents_name)


metric_types.register_metric(QueryStatistics)


def _query_statistics(total_queries_name=TOTAL_QUERIES_NAME,
                      total_documents_name=TOTAL_DOCUMENTS_NAME,
                      min_documents_name=MIN_DOCUMENTS_NAME,
                      max_documents_name=MAX_DOCUMENTS_NAME,
                      query_key: Text = '') -> metric_types.MetricComputations:
    """Returns metric computations for query statistics."""
    if not query_key:
        raise ValueError(
            'a query_key is required to use QueryStatistics metrics')

    total_queries_key = metric_types.MetricKey(name=total_queries_name)
    total_documents_key = metric_types.MetricKey(name=total_documents_name)
    min_documents_key = metric_types.MetricKey(name=min_documents_name)
Пример #17
0
    '_weighted_labels_predictions_examples')


class MeanLabel(metric_types.Metric):
    """Mean label."""
    def __init__(self, name: Text = MEAN_LABEL_NAME):
        """Initializes mean label.

    Args:
      name: Metric name.
    """
        super(MeanLabel, self).__init__(
            metric_util.merge_per_key_computations(_mean_label), name=name)


metric_types.register_metric(MeanLabel)


def _mean_label(
    name: Text = MEAN_LABEL_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    sub_key: Optional[metric_types.SubKey] = None,
    class_weights: Optional[Dict[int, float]] = None
) -> metric_types.MetricComputations:
    """Returns metric computations for mean label."""

    key = metric_types.MetricKey(name=name,
                                 model_name=model_name,
                                 output_name=output_name,
                 num_thresholds: int = DEFAULT_NUM_THRESHOLDS,
                 name: Text = CONFUSION_MATRIX_PLOT_NAME):
        """Initializes confusion matrix plot.

    Args:
      num_thresholds: Number of thresholds to use when discretizing the curve.
        Values must be > 1. Defaults to 1000.
      name: Metric name.
    """
        super(ConfusionMatrixPlot, self).__init__(
            metric_util.merge_per_key_computations(_confusion_matrix_plot),
            num_thresholds=num_thresholds,
            name=name)


metric_types.register_metric(ConfusionMatrixPlot)


def _confusion_matrix_plot(
    num_thresholds: int = DEFAULT_NUM_THRESHOLDS,
    name: Text = CONFUSION_MATRIX_PLOT_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    sub_key: Optional[metric_types.SubKey] = None,
    aggregation_type: Optional[metric_types.AggregationType] = None,
    class_weights: Optional[Dict[int, float]] = None
) -> metric_types.MetricComputations:
    """Returns metric computations for confusion matrix plots."""
    key = metric_types.PlotKey(name=name,
                               model_name=model_name,
Пример #19
0
    Args:
      thresholds: Thresholds to use for specificity. Defaults to [0.5].
      name: Metric name.
    """
    super(Specificity, self).__init__(name=name, thresholds=thresholds)

  def compute(self, tp: float, tn: float, fp: float, fn: float) -> float:
    del tp, fn
    denominator = tn + fp
    if denominator > 0.0:
      return tn / denominator
    else:
      return float('nan')


metric_types.register_metric(Specificity)


class FallOut(ConfusionMatrixMetric):
  """Fall-out (FPR)."""

  def __init__(self,
               thresholds: Optional[List[float]] = None,
               name: Text = FALL_OUT_NAME):
    """Initializes fall-out metric.

    Args:
      thresholds: Thresholds to use for fall-out. Defaults to [0.5].
      name: Metric name.
    """
    super(FallOut, self).__init__(name=name, thresholds=thresholds)
Пример #20
0
  """
    def __init__(self,
                 name=MIN_LABEL_POSITION_NAME,
                 label_key: Optional[Text] = None):
        """Initializes min label position metric.

    Args:
      name: Metric name.
      label_key: Optional label key to override default label.
    """
        super(MinLabelPosition, self).__init__(_min_label_position,
                                               name=name,
                                               label_key=label_key)


metric_types.register_metric(MinLabelPosition)


def _min_label_position(
        name=MIN_LABEL_POSITION_NAME,
        label_key: Optional[Text] = None,
        eval_config: Optional[config.EvalConfig] = None,
        model_names: Optional[List[Text]] = None,
        output_names: Optional[List[Text]] = None,
        query_key: Text = '') -> metric_types.MetricComputations:
    """Returns metric computations for min label position."""
    if not query_key:
        raise ValueError(
            'a query_key is required to use MinLabelPosition metric')
    if model_names is None:
        model_names = ['']
Пример #21
0

class SquaredPearsonCorrelation(metric_types.Metric):
    """Squared pearson correlation (r^2) metric."""
    def __init__(self, name: str = SQUARED_PEARSON_CORRELATION_NAME):
        """Initializes squared pearson correlation (r^2) metric.

    Args:
      name: Metric name.
    """
        super().__init__(metric_util.merge_per_key_computations(
            _squared_pearson_correlation),
                         name=name)


metric_types.register_metric(SquaredPearsonCorrelation)


def _squared_pearson_correlation(
        name: str = SQUARED_PEARSON_CORRELATION_NAME,
        eval_config: Optional[config_pb2.EvalConfig] = None,
        model_name: str = '',
        output_name: str = '',
        sub_key: Optional[metric_types.SubKey] = None,
        aggregation_type: Optional[metric_types.AggregationType] = None,
        class_weights: Optional[Dict[int, float]] = None,
        example_weighted: bool = False) -> metric_types.MetricComputations:
    """Returns metric computations for squared pearson correlation (r^2)."""
    key = metric_types.MetricKey(name=name,
                                 model_name=model_name,
                                 output_name=output_name,
Пример #22
0
            example_id_key=example_id_key,
            example_ids_count=example_ids_count)

    @property
    def compute_confidence_interval(self) -> bool:
        """Confidence intervals for FlipCount.

    Confidence intervals capture uncertainty in a metric if it were computed on
    more examples. For now, confidence interval has been disabled.
    Returns:
      Whether to compute confidence intervals.
    """
        return False


metric_types.register_metric(FlipCount)


def _calculate_digits(thresholds):
    digits = [len(str(t)) - 2 for t in thresholds]
    return max(max(digits), 1)


def create_metric_keys(
    thresholds: Sequence[float], metrics: List[str], metric_name: str,
    model_name: str, output_name: str, example_weighted: bool
) -> Tuple[List[metric_types.MetricKey], Dict[float, Dict[
        str, metric_types.MetricKey]]]:
    """Creates metric keys map keyed at threshold and metric name."""
    keys = []
    metric_key_by_name_by_threshold = collections.defaultdict(dict)
WEIGHTED_EXAMPLE_COUNT_NAME = 'weighted_example_count'


class WeightedExampleCount(metric_types.Metric):
    """Weighted example count."""
    def __init__(self, name: Text = WEIGHTED_EXAMPLE_COUNT_NAME):
        """Initializes weighted example count.

    Args:
      name: Metric name.
    """
        super(WeightedExampleCount, self).__init__(_weighted_example_count,
                                                   name=name)


metric_types.register_metric(WeightedExampleCount)


def _weighted_example_count(
    name: Text = WEIGHTED_EXAMPLE_COUNT_NAME,
    model_names: Optional[List[Text]] = None,
    output_names: Optional[List[Text]] = None
) -> metric_types.MetricComputations:
    """Returns metric computations for weighted example count."""
    if model_names is None:
        model_names = ['']
    if output_names is None:
        output_names = ['']
    keys = []
    computations = []
    for model_name in model_names:
  More details can be found in the following paper:
  https://www.tandfonline.com/doi/abs/10.1198/tast.2009.08210
  """

  def __init__(self, name=COEFFICIENT_OF_DISCRIMINATION_NAME):
    """Initializes coefficient of discrimination metric.

    Args:
      name: Metric name.
    """
    super(CoefficientOfDiscrimination, self).__init__(
        metric_util.merge_per_key_computations(_coefficient_of_discrimination),
        name=name)


metric_types.register_metric(CoefficientOfDiscrimination)


def _coefficient_of_discrimination(
    name: Text = COEFFICIENT_OF_DISCRIMINATION_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    class_weights: Optional[Dict[int, float]] = None
) -> metric_types.MetricComputations:
  """Returns metric computations for coefficient of discrimination."""
  key = metric_types.MetricKey(
      name=name, model_name=model_name, output_name=output_name)

  # Compute shared tjur discimination metrics.
  computations = _tjur_discrimination(
Пример #25
0
        1000.
      left: Left boundary of plot. Defaults to 0.0 when a schema is not
        provided.
      right: Right boundary of plot. Defaults to 1.0 when a schema is not
        provided.
      name: Plot name.
    """
    super(CalibrationPlot, self).__init__(
        metric_util.merge_per_key_computations(_calibration_plot),
        num_buckets=num_buckets,
        left=left,
        right=right,
        name=name)


metric_types.register_metric(CalibrationPlot)


def _find_label_domain(
    eval_config: config_pb2.EvalConfig, schema: schema_pb2.Schema,
    model_name: Text, output_name: Text
) -> Tuple[Optional[Union[int, float]], Optional[Union[int, float]]]:
  """Find the min and max value for the label_key for this model / output."""
  model_spec = model_util.get_model_spec(eval_config, model_name)
  if not model_spec:
    return None, None
  label_key = model_util.get_label_key(model_spec, output_name)
  if not label_key:
    return None, None
  label_schema = None
  for feature_schema in schema.feature:
Пример #26
0
                 num_thresholds: int = DEFAULT_NUM_THRESHOLDS,
                 name: Text = AUC_PLOT_NAME):
        """Initializes AUC plot.

    Args:
      num_thresholds: Number of thresholds to use when discretizing the curve.
        Values must be > 1. Defaults to 1000.
      name: Metric name.
    """
        super(AUCPlot,
              self).__init__(metric_util.merge_per_key_computations(_auc_plot),
                             num_thresholds=num_thresholds,
                             name=name)


metric_types.register_metric(AUCPlot)


def _auc_plot(
    num_thresholds: int = DEFAULT_NUM_THRESHOLDS,
    name: Text = AUC_PLOT_NAME,
    eval_config: Optional[config.EvalConfig] = None,
    model_name: Text = '',
    output_name: Text = '',
    sub_key: Optional[metric_types.SubKey] = None,
    class_weights: Optional[Dict[int, float]] = None
) -> metric_types.MetricComputations:
    """Returns metric computations for AUC plots."""
    key = metric_types.PlotKey(name=name,
                               model_name=model_name,
                               output_name=output_name,