def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ deps = [] if not self._built: y_true = tf.ensure_shape(y_true, (self.num_thresholds, self.num_classes)) self._build(y_true.shape) if self.multi_label or (self.label_weights is not None): # y_true should have shape (number of examples, number of labels). shapes = [(y_true, ('N', 'L'))] if self.multi_label: # TP, TN, FP, and FN should all have shape # (number of thresholds, number of labels). shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))]) if self.label_weights is not None: # label_weights should be of lenght equal to the number of labels. shapes.append((self.label_weights, ('L', ))) deps = [ check_ops.assert_shapes( shapes, message='Number of labels is not consistent.') ] # Only forward label_weights to update_confusion_matrix_variables when # multi_label is False. Otherwise the averaging of individual label AUCs is # handled in AUC.result label_weights = None if self.multi_label else self.label_weights with ops.control_dependencies(deps): metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, }, y_true, y_pred, self.thresholds, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights)
def update_state(self, y_true, y_pred, sample_weight=None): metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives }, y_true, y_pred, threshold=self.threshold, )
def update_state(self, y_true, y_pred, sample_weight=None): # for tf v1, use 'return metrics_...'. for tf v2, use 'metrics_...' (for inherited keras/tf.keras Metric class) metrics_utils_tf_keras.update_confusion_matrix_variables( { metrics_utils_tf_keras.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils_tf_keras.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils_tf_keras.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives }, y_true, y_pred, thresholds=self.thresholds, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): # https://github.com/tensorflow/tensorflow/issues/30711 # Remove return statement in case tensorflow.keras update_confusion_matrix_variables( { ConfusionMatrix.TRUE_POSITIVES: self.true_positives, ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, ConfusionMatrix.FALSE_POSITIVES: self.false_positives, ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives }, y_true, y_pred, thresholds=self.thresholds, sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates true negative and false positive statistics. Args: y_true: The ground truth values, with the same dimensions as `y_pred`. Will be cast to `bool`. y_pred: The predicted values. Each element must be in the range `[0, 1]`. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives }, y_true, y_pred, thresholds=self.thresholds, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates true positive, false positive and false negative statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.tp, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.fp, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.fn }, y_true, y_pred, self.thresholds, sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates the metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op. """ # return metrics_utils.update_confusion_matrix_variables( {self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, class_id=self.class_id, # add class_id option here multi_label=self.multi_label, # add multi_label option here sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, }, y_true=y_true, y_pred=y_pred, thresholds=self.thresholds, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight, )
def update_state(self, y_true, y_pred, sample_weight=None): if self._from_logits: y_pred = tf.keras.activations.softmax(y_pred) y_pred = y_pred[:, self._label] y_true = tf.equal(self._label, tf.cast(y_true, dtype=tf.int32)) y_true = tf.reshape(y_true, shape=(-1,)) update_op = metrics_utils.update_confusion_matrix_variables({ metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, }, y_true, y_pred, self._thresholds, sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): '''Accumulates true positive and false positive statistics.''' if self.argmax: y_pred = K.argmax(y_pred) return metrics_utils.update_confusion_matrix_variables( { metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives }, y_true, y_pred, thresholds=self.thresholds, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)
def update_state(self, y_true, y_pred, sample_weight=None): """ Accumulates the given confusion matrix condition statistics. Arguments: y_true: The ground truth values, <tf.Tensor>. y_pred: The predicted values, <tf.Tensor>. sample_weight: Optional weighting of each example. Defaults to 1. Can be a tensor whose rank is either 0, or the same rank as y_true, <tf.Tensor>. Returns: Update op. """ return metrics_utils.update_confusion_matrix_variables( {self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight, multi_label=False, label_weights=None)