def __init__(self, thresholds=None, top_k=None, class_id=None, name='class_all_binary_accuracy_tfkeras', dtype=None): super().__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils_tf_keras.NEG_INF self.thresholds = metrics_utils_tf_keras.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer) self.true_negatives = self.add_weight( 'true_negatives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer)
def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): super(F1Score, self).__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds), ), initializer=tf.keras.initializers.Zeros) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds), ), initializer=tf.keras.initializers.Zeros) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds), ), initializer=tf.keras.initializers.Zeros)
def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): super(CustomF1, self).__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( "true_positives", shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer) self.false_negatives = self.add_weight( "fasle_negatives", shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer) self.false_positives = self.add_weight( "fasle_positives", shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer)
def __init__(self, beta=1, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): name = name or str('f' + str(beta) + 'score') super().__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.beta = beta self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils_tf_keras.NEG_INF self.thresholds = metrics_utils_tf_keras.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer)
def __init__(self, thresholds=None, name='BinaryFbeta', dtype=None, beta=1): # super(Metric, self).__init__(name=name, dtype=dtype) super().__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.thresholds = parse_init_thresholds(thresholds, default_threshold=0.5) num_thresholds = len(self.thresholds) self.true_positives = self.add_weight('true_positives', shape=(num_thresholds, ), initializer='zeros') self.true_negatives = self.add_weight('true_negatives', shape=(num_thresholds, ), initializer='zeros') self.false_positives = self.add_weight('false_positives', shape=(num_thresholds, ), initializer='zeros') self.false_negatives = self.add_weight('false_negatives', shape=(num_thresholds, ), initializer='zeros') self.beta = beta
def __init__(self, name: str = "f1", dtype=None, thresholds: float = None, top_k=None, class_id=None, **kwargs) -> None: super(F1score, self).__init__(name=name, dtype=dtype, **kwargs) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds=thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( "true_positives", shape=(len(self.thresholds), ), initializer=tf.compat.v1.zeros_initializer, ) self.false_positives = self.add_weight( "false_positives", shape=(len(self.thresholds), ), initializer=tf.compat.v1.zeros_initializer, ) self.false_negatives = self.add_weight( "false_negatives", shape=(len(self.thresholds), ), initializer=tf.compat.v1.zeros_initializer, ) self.score = self.add_weight(name="f1", initializer="zeros")
def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None): ''' Creates an instance of the F1Score class :param thresholds: A float value or a python list/tuple of float threshold values in [0, 1]. :param top_k: An int value specifying the top-k predictions to consider when calculating precision :param class_id: Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions :param name: string name of the metric instance :param dtype: data type of the metric result ''' super(F1Score, self).__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer)
def __init__(self, thresholds=None, name=None, dtype=None): """Creates a `F1Score` instance. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ super(F1Score, self).__init__(name=name, dtype=dtype) self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=0.5) self.tp = self.add_weight( 'true_positives', shape=(len(self.thresholds),), initializer=metrics.init_ops.zeros_initializer) self.fp = self.add_weight( 'false_positives', shape=(len(self.thresholds),), initializer=metrics.init_ops.zeros_initializer) self.fn = self.add_weight( 'false_negatives', shape=(len(self.thresholds),), initializer=metrics.init_ops.zeros_initializer)
def __init__(self, thresholds=None, top_k=None, class_id=None, name='mcc', dtype=None, argmax=False): super(MatthewsCorrCoef, self).__init__(name=name, dtype=dtype) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id self.argmax = argmax default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds),), initializer=zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds),), initializer=zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds),), initializer=zeros_initializer) self.true_negatives = self.add_weight( 'true_negatives', shape=(len(self.thresholds),), initializer=zeros_initializer)
def __init__(self, num_classes=1, average=None, beta=1.0, thresholds=None, top_k=None, class_id=None, sample_weight=None, name=None, dtype=None): # Initialize keras base metric instance super(FBetaScore, self).__init__(name=name, dtype=dtype) # Checks if average not in (None, "micro", "macro", "weighted"): raise ValueError( "Unknown average type. Given value '{}' is not in [None, micro, macro, weighted]" .format(average)) if not isinstance(beta, float): raise TypeError( "The value of beta should be a python float, but a '{}' was given" .format(type(beta))) if beta <= 0.0: raise ValueError( "Beta value should be greater than zero, but a value of '{}' was given" .format(beta)) if type(top_k) not in (int, ) and top_k is not None: raise TypeError( "The value of top_k should be either a python float or None, but a '{}' was given" .format(type(top_k))) # Initialize the F-Beta score instance self.num_classes = num_classes self.average = average self.beta = beta self.init_thresholds = thresholds if top_k is None else 0.0 self.thresholds = metrics_utils.parse_init_thresholds( self.init_thresholds, default_threshold=0.5) self.top_k = top_k self.class_id = class_id self.sample_weight = sample_weight self.init_shape = () if self.average == 'micro' else ( self.num_classes, ) self.axis = None if self.average == 'micro' else 0 # Add metric states self.true_positives = self.add_weight(name='true_positives', shape=self.init_shape, initializer='zeros') self.false_positives = self.add_weight(name='false_positives', shape=self.init_shape, initializer='zeros') self.false_negatives = self.add_weight(name='false_negatives', shape=self.init_shape, initializer='zeros')
def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None, **kwargs): """Creates a `F1` instance. Args: thresholds: (Optional) A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. If neither thresholds nor top_k are set, the default is to calculate recall with `thresholds=0.5`. top_k: (Optional) Unset by default. An int value specifying the top-k predictions to consider when calculating recall. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. """ super().__init__(name, dtype, **kwargs) self.init_thresholds = thresholds self.top_k = top_k self.class_id = class_id default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=default_threshold) self.true_positives = self.add_weight( 'true_positives', shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer) self.false_positives = self.add_weight( 'false_positives', shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer) self.false_negatives = self.add_weight( 'false_negatives', shape=(len(self.thresholds), ), initializer=init_ops.zeros_initializer)
def __init__(self, confusion_matrix_cond, thresholds=None, class_id=None, multi_label=False, name=None, dtype=None): super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype) self._confusion_matrix_cond = confusion_matrix_cond self.init_thresholds = thresholds self.thresholds = metrics_utils.parse_init_thresholds( thresholds, default_threshold=0.5) self.class_id = class_id self.multi_label = multi_label self.accumulator = self.add_weight( 'accumulator', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer)
def __init__(self, confusion_matrix_cond, thresholds=None, top_k=None, class_id=None, sample_weight=None, name=None, dtype=None): # Initialize keras base metric instance super(_TopKConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype) # Initialize the top K confusion matrix condition count self._confusion_matrix_cond = confusion_matrix_cond self.init_thresholds = thresholds if top_k is None else 0.0 self.thresholds = metrics_utils.parse_init_thresholds( self.init_thresholds, default_threshold=0.5) self.top_k = top_k self.class_id = class_id self.sample_weight = sample_weight self.accumulator = self.add_weight('accumulator', shape=(len(self.thresholds), ), initializer='zeros')