def overlap_(p, y, bg_class): true_intervals = np.array(utils.segment_intervals(y)) true_labels = utils.segment_labels(y) pred_intervals = np.array(utils.segment_intervals(p)) pred_labels = utils.segment_labels(p) if bg_class is not None: true_intervals = np.array([ t for t, l in zip(true_intervals, true_labels) if l != bg_class ]) true_labels = np.array([l for l in true_labels if l != bg_class]) pred_intervals = np.array([ t for t, l in zip(pred_intervals, pred_labels) if l != bg_class ]) pred_labels = np.array([l for l in pred_labels if l != bg_class]) n_true_segs = true_labels.shape[0] n_pred_segs = pred_labels.shape[0] seg_scores = np.zeros(n_true_segs, np.float) for i in range(n_true_segs): for j in range(n_pred_segs): if true_labels[i] == pred_labels[j]: intersection = min( pred_intervals[j][1], true_intervals[i][1]) - max( pred_intervals[j][0], true_intervals[i][0]) union = max(pred_intervals[j][1], true_intervals[i][1]) - min( pred_intervals[j][0], true_intervals[i][0]) score_ = float(intersection) / union seg_scores[i] = max(seg_scores[i], score_) return seg_scores.mean() * 100
def overlap_d(p, y, bg_class): true_intervals = np.array(utils.segment_intervals(y)) true_labels = utils.segment_labels(y) pred_intervals = np.array(utils.segment_intervals(p)) pred_labels = utils.segment_labels(p) if bg_class is not None: true_intervals = np.array([t for t, l in zip(true_intervals, true_labels) if l != bg_class]) true_labels = np.array([l for l in true_labels if l != bg_class]) pred_intervals = np.array([t for t, l in zip(pred_intervals, pred_labels) if l != bg_class]) pred_labels = np.array([l for l in pred_labels if l != bg_class]) n_true_segs = true_labels.shape[0] n_pred_segs = pred_labels.shape[0] seg_scores = np.zeros(n_true_segs, np.float) for i in range(n_true_segs): for j in range(n_pred_segs): if true_labels[i] == pred_labels[j]: intersection = min(pred_intervals[j][1], true_intervals[i][1]) - max(pred_intervals[j][0], true_intervals[i][0]) union = pred_intervals[j][1] - pred_intervals[j][0] score_ = float(intersection) / union seg_scores[i] = max(seg_scores[i], score_) return seg_scores.mean() * 100
def overlap_(p, y, n_classes, bg_class, overlap): true_intervals = np.array(utils.segment_intervals(y)) true_labels = utils.segment_labels(y) pred_intervals = np.array(utils.segment_intervals(p)) pred_labels = utils.segment_labels(p) # Remove background labels if bg_class is not None: true_intervals = true_intervals[true_labels != bg_class] true_labels = true_labels[true_labels != bg_class] pred_intervals = pred_intervals[pred_labels != bg_class] pred_labels = pred_labels[pred_labels != bg_class] n_true = true_labels.shape[0] n_pred = pred_labels.shape[0] # We keep track of the per-class TPs, and FPs. # In the end we just sum over them though. TP = np.zeros(n_classes, np.float) FP = np.zeros(n_classes, np.float) true_used = np.zeros(n_true, np.float) for j in range(n_pred): # Compute IoU against all others intersection = np.minimum( pred_intervals[j, 1], true_intervals[:, 1]) - np.maximum( pred_intervals[j, 0], true_intervals[:, 0]) union = np.maximum(pred_intervals[j, 1], true_intervals[:, 1]) - np.minimum( pred_intervals[j, 0], true_intervals[:, 0]) IoU = (intersection / union) * (pred_labels[j] == true_labels) # Get the best scoring segment idx = IoU.argmax() # If the IoU is high enough and the true segment isn't already used # Then it is a true positive. Otherwise is it a false positive. if IoU[idx] >= overlap and not true_used[idx]: TP[pred_labels[j]] += 1 true_used[idx] = 1 else: FP[pred_labels[j]] += 1 TP = TP.sum() FP = FP.sum() # False negatives are any unused true segment (i.e. "miss") FN = n_true - true_used.sum() precision = TP / (TP + FP) recall = TP / (TP + FN) F1 = 2 * (precision * recall) / (precision + recall) # If the prec+recall=0, it is a NaN. Set these to 0. F1 = np.nan_to_num(F1) return F1 * 100
def overlap_(p, y, n_classes, bg_class, overlap): true_intervals = np.array(utils.segment_intervals(y)) true_labels = utils.segment_labels(y) pred_intervals = np.array(utils.segment_intervals(p)) pred_labels = utils.segment_labels(p) # Remove background labels if bg_class is not None: true_intervals = true_intervals[true_labels != bg_class] true_labels = true_labels[true_labels != bg_class] pred_intervals = pred_intervals[pred_labels != bg_class] pred_labels = pred_labels[pred_labels != bg_class] n_true = true_labels.shape[0] n_pred = pred_labels.shape[0] # We keep track of the per-class TPs, and FPs. # In the end we just sum over them though. TP = np.zeros(n_classes, np.float) FP = np.zeros(n_classes, np.float) true_used = np.zeros(n_true, np.float) for j in range(n_pred): # Compute IoU against all others intersection = np.minimum(pred_intervals[j, 1], true_intervals[:, 1]) - np.maximum(pred_intervals[j, 0], true_intervals[:, 0]) union = np.maximum(pred_intervals[j, 1], true_intervals[:, 1]) - np.minimum(pred_intervals[j, 0], true_intervals[:, 0]) IoU = (intersection / union) * (pred_labels[j] == true_labels) # Get the best scoring segment idx = IoU.argmax() # If the IoU is high enough and the true segment isn't already used # Then it is a true positive. Otherwise is it a false positive. if IoU[idx] >= overlap and not true_used[idx]: TP[pred_labels[j]] += 1 true_used[idx] = 1 else: FP[pred_labels[j]] += 1 TP = TP.sum() FP = FP.sum() # False negatives are any unused true segment (i.e. "miss") FN = n_true - true_used.sum() precision = TP / (TP + FP) recall = TP / (TP + FN) F1 = 2 * (precision * recall) / (precision + recall) # If the prec+recall=0, it is a NaN. Set these to 0. F1 = np.nan_to_num(F1) return F1 * 100
def edit_score(P, Y, norm=True, bg_class=None, **kwargs): if type(P) == list: tmp = [edit_score(P[i], Y[i], norm, bg_class) for i in range(len(P))] return np.mean(tmp) else: P_ = utils.segment_labels(P) Y_ = utils.segment_labels(Y) if bg_class is not None: P_ = [c for c in P_ if c != bg_class] Y_ = [c for c in Y_ if c != bg_class] return levenstein_(P_, Y_, norm)
def clf_(p, y, bg_class): sums = 0. n_segs = 0. S_true = utils.segment_labels(y) I_true = np.array(utils.segment_intervals(y)) for i in range(len(S_true)): if S_true[i] == bg_class: continue # If p is 1d, compute the most likely label, otherwise take the max over the score if p.ndim == 1: pred_label = scipy.stats.mode(p[I_true[i][0]:I_true[i][1]])[0][0] else: pred_label = p[I_true[i][0]:I_true[i][1]].mean(1).argmax() sums += pred_label == S_true[i] n_segs += 1 return sums / n_segs * 100
def clf_(p, y, bg_class): sums = 0. n_segs = 0. S_true = utils.segment_labels(y) I_true = np.array(utils.segment_intervals(y)) for i in range(len(S_true)): if S_true[i] == bg_class: continue # If p is 1d, compute the most likely label, otherwise take the max over the score if p.ndim==1: pred_label = scipy.stats.mode(p[I_true[i][0]:I_true[i][1]])[0][0] else: pred_label = p[I_true[i][0]:I_true[i][1]].mean(1).argmax() sums += pred_label==S_true[i] n_segs += 1 return sums / n_segs * 100