Example #1
0
def running_precision(y_true, y_pred):
    TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    TP_FP = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = TP / (TP_FP + K.epsilon())
    return precision
Example #2
0
def dice_coef_clipped(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(K.round(y_true))
    y_pred_f = K.flatten(K.round(y_pred))
    intersection = K.sum(y_true_f * y_pred_f)
    return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) +
                                                  K.sum(y_pred_f) + smooth)
Example #3
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Example #4
0
def tp(y_true, y_pred):
    smooth = 1
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pos = K.round(K.clip(y_true, 0, 1))
    tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)
    return tp
 def recall_m(y_true, y_pred):
     TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
     Positives = K.sum(K.round(K.clip(y_true, 0, 1)))
     
     recall = TP / (Positives+K.epsilon())    
     return recall 
Example #6
0
 def reverse_sequence(self, x, mask):
     """这里的mask.shape是[batch_size, seq_len, 1]
     """
     seq_len = K.round(K.sum(mask, 1)[:, 0])
     seq_len = K.cast(seq_len, 'int32')
     return tf.reverse_sequence(x, seq_len, seq_dim=1)
Example #7
0
def specificity(y_true, y_pred): #TN/N (recall) 1=good
	true_negatives = K.sum(K.round(K.clip((1-y_true) * (1-y_pred), 0, 1)))
	possible_negatives = K.sum(K.round(K.clip(1-y_true, 0, 1)))
	return true_negatives / (possible_negatives + K.epsilon())
Example #8
0
def recall(y_true, y_pred):
    true_positives_computed = true_positives(y_true, y_pred)
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    return true_positives_computed / (possible_positives + K.epsilon())
Example #9
0
def precision(y_true, y_pred):
    true_positives_computed = true_positives(y_true, y_pred)
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives_computed / (predicted_positives + K.epsilon())
    return precision
Example #10
0
def true_negatives(y_true, y_pred):
    y_pred_neg = 1 - K.round(y_pred)
    y_neg = 1 - y_true
    return K.sum(y_neg * y_pred_neg)
Example #11
0
def false_negatives(y_true, y_pred):
    y_pred_neg = 1 - K.round(y_pred)
    return K.sum(y_true * y_pred_neg)
Example #12
0
def true_positives(y_true, y_pred):
    return K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
Example #13
0
 def recall(y_true, y_pred):
     true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))  # mistake: y_pred of 0.3 is also considered 1
     possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
     recall = true_positives / (possible_positives + K.epsilon())
     return recall
Example #14
0
def f1_macro(y_true, y_pred):
    return tf.py_function(f1_sklean_mapping_macro, (y_true, K.round(y_pred)),
                          tf.double)
 def soft_acc(self, y_true, y_pred):
     return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
Example #16
0
 def accuracy(self, y_true, y_pred):
     return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
Example #17
0
 def soft_acc(self, y_true, y_pred):
     return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
 
     '''pembuatan model'''
Example #18
0
def threshold_binary_accuracy(y_true, y_pred):
    return K.mean(K.equal(K.round(y_true), K.round(y_pred)), axis=-1)
Example #19
0
def sensitivity(y_true, y_pred): #TP/P 1=good
	true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
	possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
	return true_positives / (possible_positives + K.epsilon())
Example #20
0
def round_through(x):
    rounded = K.round(x)
    return x + K.stop_gradient(rounded - x)
Example #21
0
def precision(y_true, y_pred): #TP/TP+FP 1=good
	true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
	predicted_positives = K.sum(K.round(K.clip(y_pred,0, 1)))
	return true_positives / (predicted_positives + K.epsilon())
Example #22
0
 def recall(y_true, y_pred):
     tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
     rec = tp / (K.sum(K.round(K.clip(y_true, 0, 1))) + K.epsilon())
     return rec
Example #23
0
def true_positive_rate(y_true, y_pred):
    return K.sum(
        K.flatten(y_true) * K.flatten(K.round(y_pred))) / K.sum(y_true)
Example #24
0
 def precision(y_true, y_pred):
     tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
     prec = tp / (K.sum(K.round(K.clip(y_pred, 0, 1))) + K.epsilon())
     return prec
 def precision_m(y_true, y_pred):
     TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
     Pred_Positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
 
     precision = TP / (Pred_Positives+K.epsilon())
     return precision 
    # switch here
    generator = test_generator

    print("Dataloader done, time (s):", time.time() - start)
    print("Evaluating:")

    prediction = model.predict(generator)

    count = 0
    for i, p in enumerate(prediction):
        labels = generator[i // bsize][1][
            i % bsize]  # select batch, select labels, select sample
        label_idx = np.where(labels)[0]

        p *= confidence_boost
        count += K.sum(K.round(K.clip(p, 0, 1)))
        predicted_positives = K.round(K.clip(p, 0, 1)).numpy().astype(np.int)
        idx, = np.where(predicted_positives)
        predicted_birds = [birdcodes.inverted_bird_code[m] for m in idx]

        print("prediciton", i, predicted_birds, "prob", p[idx], "label",
              [birdcodes.inverted_bird_code[x] for x in label_idx])

    print("Number of predicted positives", count.numpy())

    results = model.evaluate(generator)
    results = {out: results[i] for i, out in enumerate(model.metrics_names)}
    print("EVALUATION:")
    padding = max(map(len, results))
    for k, v in results.items():
        print(f"{k:{padding}s}: {v}")
def round_through(x):
    '''Element-wise rounding to the closest integer with full gradient propagation.
    A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
    '''
    rounded = K.round(x)
    return x + K.stop_gradient(rounded - x)
def recall_m(y_true, y_pred):
    y_pred *= confidence_boost
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
Example #29
0
    def recall(y_true, y_pred):

        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
Example #30
0
def running_recall(y_true, y_pred):
    TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    TP_FN = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = TP / (TP_FN + K.epsilon())
    return recall