Esempio n. 1
0
    def __init__(self,
                 num_buckets: Optional[int] = None,
                 left: Optional[float] = None,
                 right: Optional[float] = None,
                 name: Optional[str] = None,
                 ignore_out_of_bound_examples: bool = False):
        """Initializes lift metrics.

    Args:
      num_buckets: Number of buckets to use. Note that the actual number of
        buckets will be num_buckets + 2 to account for the edge cases.
      left: Start of labels interval.
      right: End of labels interval.
      name: Metric name.
      ignore_out_of_bound_examples: Whether to ignore examples with label values
        falling outside of provide label interval i.e. [left, right).
    """
        if name is None:
            name = f'{LIFT_METRICS_NAME}@{num_buckets or DEFAULT_NUM_BUCKETS}'
        super().__init__(
            metric_util.merge_per_key_computations(_lift_metrics),
            num_buckets=num_buckets,
            left=left,
            right=right,
            name=name,
            ignore_out_of_bound_examples=ignore_out_of_bound_examples)
Esempio n. 2
0
    def __init__(
        self,
        counterfactual_prediction_key: str,
        name: str = FLIP_RATE_NAME,
        thresholds: Sequence[float] = flip_count.DEFAULT_THRESHOLDS,
        example_id_key: Optional[str] = None,
        example_ids_count: int = flip_count.DEFAULT_NUM_EXAMPLE_IDS,
    ):
        """Initializes flip rate metrics.

    Args:
      counterfactual_prediction_key: Prediction label key for counterfactual
        example to be used to measure the flip count.
      name: Metric name.
      thresholds: Thresholds to be used to measure flips.
      example_id_key: Feature key containing example id.
      example_ids_count: Max number of example ids to be extracted for false
        positives and false negatives.
    """
        super().__init__(
            metric_util.merge_per_key_computations(_flip_rate),
            counterfactual_prediction_key=counterfactual_prediction_key,
            thresholds=thresholds,
            name=name,
            example_id_key=example_id_key,
            example_ids_count=example_ids_count)
    def __init__(self,
                 thresholds: Optional[List[float]] = None,
                 num_thresholds: Optional[int] = None,
                 name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME):
        """Initializes multi-class confusion matrix.

    Args:
      thresholds: Optional thresholds. If the top prediction is less than a
        threshold then the associated example will be assumed to have no
        prediction associated with it (the predicted_class_id will be set to
        NO_PREDICTED_CLASS_ID). Only one of either thresholds or num_thresholds
        should be used. If both are unset, then [0.0] will be assumed.
      num_thresholds: Number of thresholds to use. The thresholds will be evenly
        spaced between 0.0 and 1.0 and inclusive of the boundaries (i.e. to
        configure the thresholds to [0.0, 0.25, 0.5, 0.75, 1.0], the parameter
        should be set to 5). Only one of either thresholds or num_thresholds
        should be used.
      name: Metric name.
    """
        super(MultiClassConfusionMatrixPlot,
              self).__init__(metric_util.merge_per_key_computations(
                  _multi_class_confusion_matrix_plot),
                             thresholds=thresholds,
                             num_thresholds=num_thresholds,
                             name=name)  # pytype: disable=wrong-arg-types
Esempio n. 4
0
    def __init__(self,
                 counterfactual_prediction_key: Optional[str] = None,
                 example_id_key: Optional[str] = None,
                 example_ids_count: int = DEFAULT_NUM_EXAMPLE_IDS,
                 name: str = FLIP_COUNT_NAME,
                 thresholds: Sequence[float] = DEFAULT_THRESHOLDS):
        """Initializes flip count.

    Args:
      counterfactual_prediction_key: Prediction label key for counterfactual
        example to be used to measure the flip count if the counterfactual
        example is contained within features. Otherwise the baseline model
        prediction will be used as the counterfactual.
      example_id_key: Feature key containing example id.
      example_ids_count: Max number of example ids to be extracted for false
        positives and false negatives.
      name: Metric name.
      thresholds: Thresholds to be used to measure flips.
    """
        super().__init__(
            metric_util.merge_per_key_computations(flip_count),
            name=name,
            thresholds=thresholds,
            counterfactual_prediction_key=counterfactual_prediction_key,
            example_id_key=example_id_key,
            example_ids_count=example_ids_count)
Esempio n. 5
0
  def __init__(self, name: str = MEAN_PREDICTION_NAME):
    """Initializes mean prediction.

    Args:
      name: Metric name.
    """
    super().__init__(
        metric_util.merge_per_key_computations(_mean_prediction), name=name)
Esempio n. 6
0
    def __init__(self, name: Text = MEAN_LABEL_NAME):
        """Initializes mean label.

    Args:
      name: Metric name.
    """
        super(MeanLabel, self).__init__(
            metric_util.merge_per_key_computations(_mean_label), name=name)
Esempio n. 7
0
    def __init__(self, name: Text = CALIBRATION_NAME):
        """Initializes calibration.

    Args:
      name: Metric name.
    """
        super(Calibration, self).__init__(
            metric_util.merge_per_key_computations(_calibration), name=name)
  def __init__(self, name=SQUARED_PEARSON_CORRELATION_NAME):
    """Initializes squared pearson correlation (r^2) metric.

    Args:
      name: Metric name.
    """
    super(SquaredPearsonCorrelation, self).__init__(
        metric_util.merge_per_key_computations(_squared_pearson_correlation),
        name=name)  # pytype: disable=wrong-arg-types
Esempio n. 9
0
    def __init__(self, name: str = TOTAL_ATTRIBUTIONS_NAME):
        """Initializes total attributions metric.

    Args:
      name: Attribution metric name.
    """
        super().__init__(metric_util.merge_per_key_computations(
            functools.partial(_total_attributions, False)),
                         name=name)
Esempio n. 10
0
    def __init__(self, name: str = SQUARED_PEARSON_CORRELATION_NAME):
        """Initializes squared pearson correlation (r^2) metric.

    Args:
      name: Metric name.
    """
        super().__init__(metric_util.merge_per_key_computations(
            _squared_pearson_correlation),
                         name=name)
Esempio n. 11
0
    def __init__(self, name: str = MEAN_ABSOLUTE_ATTRIBUTIONS_NAME):
        """Initializes mean absolute attributions metric.

    Args:
      name: Attribution metric name.
    """
        super().__init__(metric_util.merge_per_key_computations(
            functools.partial(_mean_attributions, True)),
                         name=name)
  def __init__(self, name=COEFFICIENT_OF_DISCRIMINATION_NAME):
    """Initializes coefficient of discrimination metric.

    Args:
      name: Metric name.
    """
    super(CoefficientOfDiscrimination, self).__init__(
        metric_util.merge_per_key_computations(_coefficient_of_discrimination),
        name=name)
Esempio n. 13
0
    def __init__(self, name: Text = MEAN_PREDICTION_NAME):
        """Initializes mean prediction.

    Args:
      name: Metric name.
    """
        super(MeanPrediction, self).__init__(
            metric_util.merge_per_key_computations(_mean_prediction),
            name=name)  # pytype: disable=wrong-arg-types
  def __init__(self, name=RELATIVE_COEFFICIENT_OF_DISCRIMINATION_NAME):
    """Initializes relative coefficient of discrimination metric.

    Args:
      name: Metric name.
    """
    super(RelativeCoefficientOfDiscrimination, self).__init__(
        metric_util.merge_per_key_computations(
            _relative_coefficient_of_discrimination),
        name=name)  # pytype: disable=wrong-arg-types
Esempio n. 15
0
    def __init__(self, name: Text = MEAN_ATTRIBUTIONS_NAME):
        """Initializes mean attributions metric.

    Args:
      name: Attribution metric name.
    """
        super(MeanAttributions,
              self).__init__(metric_util.merge_per_key_computations(
                  functools.partial(_mean_attributions, False)),
                             name=name)
Esempio n. 16
0
    def __init__(self, name: Text = TOTAL_ABSOLUTE_ATTRIBUTIONS_NAME):
        """Initializes total absolute attributions metric.

    Args:
      name: Attribution metric name.
    """
        super(TotalAbsoluteAttributions, self).__init__(
            metric_util.merge_per_key_computations(_total_attributions),
            absolute=True,
            name=name)
Esempio n. 17
0
    def __init__(self, name: Text = MEAN_ABSOLUTE_ATTRIBUTIONS_NAME):
        """Initializes mean absolute attributions metric.

    Args:
      name: Attribution metric name.
    """
        super(MeanAbsoluteAttributions, self).__init__(
            metric_util.merge_per_key_computations(_mean_attributions),
            absolute=True,
            name=name)
Esempio n. 18
0
  def __init__(self, name: str = RELATIVE_COEFFICIENT_OF_DISCRIMINATION_NAME):
    """Initializes relative coefficient of discrimination metric.

    Args:
      name: Metric name.
    """
    super().__init__(
        metric_util.merge_per_key_computations(
            _relative_coefficient_of_discrimination),
        name=name)
Esempio n. 19
0
  def __init__(self, name: Text, thresholds: Optional[List[float]] = None):
    """Initializes confusion matrix metric.

    Args:
      name: Metric name.
      thresholds: Thresholds to use for specificity. Defaults to [0.5].
    """
    super(ConfusionMatrixMetric, self).__init__(
        metric_util.merge_per_key_computations(self._metric_computation),
        thresholds=thresholds,
        name=name)  # pytype: disable=wrong-arg-types
    def __init__(self,
                 thresholds: Optional[List[float]] = None,
                 name: Text = MISS_RATE_NAME):
        """Initializes miss rate metric.

    Args:
      thresholds: Thresholds to use for miss rate. Defaults to [0.5].
      name: Metric name.
    """
        super(MissRate, self).__init__(
            metric_util.merge_per_key_computations(_miss_rate),
            thresholds=thresholds,
            name=name)
    def __init__(self,
                 thresholds: Optional[List[float]] = None,
                 name: Text = SPECIFICITY_NAME):
        """Initializes specificity metric.

    Args:
      thresholds: Thresholds to use for specificity. Defaults to [0.5].
      name: Metric name.
    """
        super(Specificity, self).__init__(
            metric_util.merge_per_key_computations(_specificity),
            thresholds=thresholds,
            name=name)
    def __init__(self,
                 thresholds: Optional[List[float]] = None,
                 name: Text = FALL_OUT_NAME):
        """Initializes fall-out metric.

    Args:
      thresholds: Thresholds to use for fall-out. Defaults to [0.5].
      name: Metric name.
    """
        super(FallOut,
              self).__init__(metric_util.merge_per_key_computations(_fall_out),
                             thresholds=thresholds,
                             name=name)
    def __init__(self,
                 thresholds: Sequence[float] = DEFAULT_THRESHOLDS,
                 name: str = FAIRNESS_INDICATORS_METRICS_NAME):
        """Initializes fairness indicators metrics.

    Args:
      thresholds: Thresholds to use for fairness metrics.
      name: Metric name.
    """
        super().__init__(metric_util.merge_per_key_computations(
            _fairness_indicators_metrics_at_thresholds),
                         thresholds=thresholds,
                         name=name)
Esempio n. 24
0
  def __init__(self,
               thresholds: List[float],
               name: Text = CONFUSION_MATRIX_AT_THRESHOLDS_NAME):
    """Initializes confusion matrix at thresholds.

    Args:
      thresholds: Thresholds to use for confusion matrix.
      name: Metric name.
    """
    super(ConfusionMatrixAtThresholds, self).__init__(
        metric_util.merge_per_key_computations(_confusion_matrix_at_thresholds),
        thresholds=thresholds,
        name=name)  # pytype: disable=wrong-arg-types
    def __init__(self,
                 num_thresholds: int = DEFAULT_NUM_THRESHOLDS,
                 name: Text = CONFUSION_MATRIX_PLOT_NAME):
        """Initializes confusion matrix plot.

    Args:
      num_thresholds: Number of thresholds to use when discretizing the curve.
        Values must be > 1. Defaults to 1000.
      name: Metric name.
    """
        super(ConfusionMatrixPlot, self).__init__(
            metric_util.merge_per_key_computations(_confusion_matrix_plot),
            num_thresholds=num_thresholds,
            name=name)
Esempio n. 26
0
    def __init__(self,
                 thresholds: List[float] = DEFAULT_THERSHOLDS,
                 name: Text = FAIRNESS_INDICATORS_METRICS_NAME):
        """Initializes fairness indicators metrics.

    Args:
      thresholds: Thresholds to use for fairness metrics.
      name: Metric name.
    """
        super(FairnessIndicators,
              self).__init__(metric_util.merge_per_key_computations(
                  _fairness_indicators_metrics_at_thresholds),
                             thresholds=thresholds,
                             name=name)  # pytype: disable=wrong-arg-types
    def __init__(self,
                 num_thresholds: int = DEFAULT_NUM_THRESHOLDS,
                 name: Text = AUC_PLOT_NAME):
        """Initializes AUC plot.

    Args:
      num_thresholds: Number of thresholds to use when discretizing the curve.
        Values must be > 1. Defaults to 1000.
      name: Metric name.
    """
        super(AUCPlot,
              self).__init__(metric_util.merge_per_key_computations(_auc_plot),
                             num_thresholds=num_thresholds,
                             name=name)
Esempio n. 28
0
    def __init__(self,
                 thresholds: Optional[float] = None,
                 name: Text = MULTI_LABEL_CONFUSION_MATRIX_PLOT_NAME):
        """Initializes multi-label confusion matrix.

    Args:
      thresholds: Optional thresholds. Defaults to [0.5].
      name: Metric name.
    """
        super(MultiLabelConfusionMatrixPlot,
              self).__init__(metric_util.merge_per_key_computations(
                  _multi_label_confusion_matrix_plot),
                             thresholds=thresholds,
                             name=name)
Esempio n. 29
0
    def __init__(self,
                 thresholds: Optional[List[float]] = None,
                 name: str = MULTI_CLASS_CONFUSION_MATRIX_AT_THRESHOLDS_NAME):
        """Initializes multi-class confusion matrix.

    Args:
      thresholds: Optional thresholds, defaults to 0.5 if not specified. If the
        top prediction is less than a threshold then the associated example will
        be assumed to have no prediction associated with it (the
        predicted_class_id will be set to NO_PREDICTED_CLASS_ID).
      name: Metric name.
    """
        super().__init__(metric_util.merge_per_key_computations(
            _multi_class_confusion_matrix_at_thresholds),
                         thresholds=thresholds,
                         name=name)  # pytype: disable=wrong-arg-types
Esempio n. 30
0
  def __init__(self,
               thresholds: Optional[float] = None,
               name: Text = MULTI_CLASS_CONFUSION_MATRIX_AT_THRESHOLDS_NAME):
    """Initializes multi-class confusion matrix.

    Args:
      thresholds: Optional thresholds. If the top prediction is less than a
        threshold then the associated example will be assumed to have no
        prediction associated with it (the predicted_class_id will be set to
        NO_PREDICTED_CLASS_ID). Defaults to [0.0].
      name: Metric name.
    """
    super(MultiClassConfusionMatrixAtThresholds, self).__init__(
        metric_util.merge_per_key_computations(
            _multi_class_confusion_matrix_at_thresholds),
        thresholds=thresholds,
        name=name)