def _calc_metrics(self, boxes, anno): ious = rect_iou(boxes, anno) # special case not_present_mask_anno = np.any(np.isnan(anno), axis=1) not_present_mask_boxes = np.any(np.isnan(boxes), axis=1) ious[np.logical_or(not_present_mask_anno, not_present_mask_boxes)] = 0.0 ious[np.logical_and(not_present_mask_anno, not_present_mask_boxes)] = 1.0 return ious
def test_iou(self): rects1 = np.random.rand(1000, 4) * 100 rects2 = np.random.rand(1000, 4) * 100 bound = (50, 100) ious1 = rect_iou(rects1, rects2, bound=bound) ious2 = poly_iou(rects1, rects2, bound=bound) self.assertTrue((ious1 - ious2).max() < 1e-14) polys1 = self._rect2corner(rects1) polys2 = self._rect2corner(rects2) ious3 = poly_iou(polys1, polys2, bound=bound) self.assertTrue((ious1 - ious3).max() < 1e-14)
def run(self, trackers): os.makedirs(self.report_dir) curr = self.report_dir cooldown = 0 for d, dataset in enumerate(self.datasets): for s, (img_files, anno) in enumerate(dataset): seq_name = dataset.seq_names[s] frame_num = len(img_files) boxes = np.zeros((len(trackers), frame_num, 4)) for f, img_file in enumerate(img_files): cooldown -= 1 image = Image.open(img_file) if not image.mode == 'RGB': image = image.convert('RGB') if f == 0: for tracker in trackers: tracker.init(image, anno[0, :]) else: for i, tracker in enumerate(trackers): boxes[i, f, :] = tracker.update(image) div = rect_iou(np.array([boxes[0, f, :]]), np.array([boxes[1, f, :]])) if div[0] < self.theta and f > 0 and cooldown <= 0: _, ax = plt.subplots() ax.imshow(image) for i, _ in enumerate(trackers): box = boxes[i, f, :] rect = patches.Rectangle(box[:2], *box[2:], linewidth=5, edgecolor=self.colors[i], facecolor='none') ax.add_patch(rect) rect = patches.Rectangle(anno[f, :][:2], *anno[f, :][2:], linewidth=5, edgecolor='springgreen', facecolor='none') ax.add_patch(rect) name = f"s{s}_f{f}.png" plt.axis('off') plt.savefig(os.path.join(curr, name), bbox_inches='tight', pad_inches=0) cooldown = self.cd
def run(self, tracker): for d, dataset in enumerate(self.datasets): key = self.names[d] d_fr = 0 for s, (img_files, anno) in enumerate(dataset): seq_name = dataset.seq_names[s] print(seq_name) s_fr = 0 frame_num = len(img_files) print(frame_num) boxes = np.zeros((frame_num, 4)) for f, img_file in enumerate(img_files): print(f) image = Image.open(img_file) if not image.mode == 'RGB': image = image.convert('RGB') # init on first frame of s if f == 0: tracker.init(image, anno[0, :]) else: boxes[f, :] = tracker.update(image) iou = rect_iou(np.array([boxes[f, :]]), np.array([anno[f, :]])) # re-init if prediction overlap < .1 if iou[0] <= self.theta and f > 0: if f < (frame_num - 1): tracker.init(image, anno[f, :]) d_fr += 1 s_fr += 1 self.dict[key]['seq_wise'][s] = { 'fr': s_fr, 'length': frame_num } self.dict[key]['total'] = {'fr': d_fr} name = args.weights.split('/')[-1].split('.')[0] os.makedirs(os.path.join(self.report_dir, name)) curr_dir = os.path.join(self.report_dir, name) report_file = os.path.join(curr_dir, 'fr.json') with open(report_file, 'w') as f: json.dump(self.dict, f, indent=4)
def _calc_metrics(boxes, anno): # can be modified by children classes ious = rect_iou(boxes, anno) center_errors = center_error(boxes, anno) return ious, center_errors
def report(self, tracker_names, return_report=False): assert isinstance(tracker_names, (list, tuple)) if self.subset == 'val': # meta information is useful when evaluation self.dataset.return_meta = True # assume tracker_names[0] is your tracker report_dir = os.path.join(self.report_dir, self.experiment_name, tracker_names[0]) if not os.path.exists(report_dir): os.makedirs(report_dir) report_file = os.path.join(report_dir, 'performance.json') # visible ratios of all sequences seq_names = self.dataset.seq_names # covers = {s: self.dataset[s][2]['cover'][1:] for s in seq_names} covers = {s: 8 for s in seq_names} performance = {} for name in tracker_names: print('Evaluating', name) ious = {} times = {} performance.update({name: { 'overall': {}, 'seq_wise': {}}}) for s, (_, _, anno) in enumerate(self.dataset): seq_name = self.dataset.seq_names[s] record_files = glob.glob(os.path.join( self.result_dir, self.experiment_name, name, seq_name, '%s_[0-9]*.txt' % seq_name)) print('Evaluating {}'.format(seq_name)) if len(record_files) == 0: print('\tResults for {} not found. Skipping.'.format(seq_name)) continue else: print('\tEvaluating results for {}.'.format(seq_name)) # read results of all repetitions boxes = [np.loadtxt(f, delimiter=',') for f in record_files] assert all([b.shape == anno.shape for b in boxes]) # calculate and stack all ious bound = np.array([630, 460]) seq_ious = [rect_iou(b[1:], anno[1:], bound=bound) for b in boxes] # only consider valid frames where targets are visible seq_ious = [t[covers[seq_name] > 0] for t in seq_ious] seq_ious = np.concatenate(seq_ious) ious[seq_name] = seq_ious # stack all tracking times times[seq_name] = [] time_file = os.path.join( self.result_dir, self.experiment_name, name, seq_name, '%s_time.txt' % seq_name) if os.path.exists(time_file): seq_times = np.loadtxt(time_file, delimiter=',') seq_times = seq_times[~np.isnan(seq_times)] seq_times = seq_times[seq_times > 0] if len(seq_times) > 0: times[seq_name] = seq_times else: print('\tCould not find times file {}'.format(time_file)) # store sequence-wise performance ao, sr, speed, _ = self._evaluate(seq_ious, seq_times) performance[name]['seq_wise'].update({seq_name: { 'ao': ao, 'sr': sr, 'speed_fps': speed, 'length': len(anno) - 1}}) # ious = np.concatenate(list(ious.values())) ious_list = [iou[0] for iou in ious.values()] ious = np.concatenate(ious_list) ious = np.expand_dims(ious, axis=0) times = np.concatenate(list(times.values())) # store overall performance ao, sr, speed, succ_curve = self._evaluate(ious, times) performance[name].update({'overall': { 'ao': ao, 'sr': sr, 'speed_fps': speed, 'succ_curve': succ_curve.tolist()}}) # save performance with open(report_file, 'w') as f: json.dump(performance, f, indent=4) # plot success curves if return_report: return performance else: plotter = self.plot_curves([report_file], tracker_names) return
def report(self, tracker_names, plot_curves=True): assert isinstance(tracker_names, (list, tuple)) if self.subset == 'test': pwd = os.getcwd() # generate compressed submission file for each tracker for tracker_name in tracker_names: # compress all tracking results result_dir = os.path.join(self.result_dir, tracker_name) os.chdir(result_dir) save_file = '../%s' % tracker_name compress('.', save_file) print('Records saved at', save_file + '.zip') # print submission guides print('\033[93mLogin and follow instructions on') print('http://got-10k.aitestunion.com/submit_instructions') print('to upload and evaluate your tracking results\033[0m') # switch back to previous working directory os.chdir(pwd) return None elif self.subset == 'val': # meta information is useful when evaluation self.dataset.return_meta = True # assume tracker_names[0] is your tracker report_dir = os.path.join(self.report_dir, tracker_names[0]) if not os.path.exists(report_dir): os.makedirs(report_dir) report_file = os.path.join(report_dir, 'performance.json') # visible ratios of all sequences seq_names = self.dataset.seq_names covers = {s: self.dataset[s][2]['cover'][1:] for s in seq_names} performance = {} for name in tracker_names: print('Evaluating', name) ious = {} ces = {} times = {} performance.update({name: {'overall': {}, 'seq_wise': {}}}) for s, (_, anno, meta) in enumerate(self.dataset): seq_name = self.dataset.seq_names[s] record_files = glob.glob( os.path.join(self.result_dir, name, seq_name, '%s_[0-9]*.txt' % seq_name)) if len(record_files) == 0: raise Exception('Results for sequence %s not found.' % seq_name) # read results of all repetitions boxes = [ np.loadtxt(f, delimiter=',') for f in record_files ] assert all([b.shape == anno.shape for b in boxes]) # calculate and stack all ious bound = ast.literal_eval(meta['resolution']) seq_ious = [ rect_iou(b[1:], anno[1:], bound=bound) for b in boxes ] seq_ces = [center_error(b[1:], anno[1:]) for b in boxes] # only consider valid frames where targets are visible seq_ious = [t[covers[seq_name] > 0] for t in seq_ious] seq_ces = [t[covers[seq_name] > 0] for t in seq_ces] seq_ious = np.concatenate(seq_ious) seq_ces = np.concatenate(seq_ces) ious[seq_name] = seq_ious ces[seq_name] = seq_ces # stack all tracking times times[seq_name] = [] time_file = os.path.join(self.result_dir, name, seq_name, '%s_time.txt' % seq_name) if os.path.exists(time_file): seq_times = np.loadtxt(time_file, delimiter=',') seq_times = seq_times[~np.isnan(seq_times)] seq_times = seq_times[seq_times > 0] if len(seq_times) > 0: times[seq_name] = seq_times # store sequence-wise performance ao, sr, ce, r, speed, _ = self._evaluate( seq_ious, seq_ces, seq_times) performance[name]['seq_wise'].update({ seq_name: { 'ao': ao, 'sr': sr, 'ce': ce, 'r': r, 'speed_fps': speed, 'length': len(anno) - 1 } }) ious = np.concatenate(list(ious.values())) ces = np.concatenate(list(ces.values())) times = np.concatenate(list(times.values())) # store overall performance ao, sr, ce, r, speed, succ_curve = self._evaluate( ious, ces, times) performance[name].update({ 'overall': { 'ao': ao, 'sr': sr, 'ce': ce, 'r': r, 'speed_fps': speed, 'succ_curve': succ_curve.tolist() } }) # save performance with open(report_file, 'w') as f: json.dump(performance, f, indent=4) # plot success curves if plot_curves: self.plot_curves([report_file], tracker_names) return performance
def anchor_ratio_performances(dataset, trackers, vot_un=False, disc=True): ratios = [] mean_ratio = 0 performances = [] mean_performance = 0 video_perf = [] total_length = 0 video_ranks = [] if isinstance(dataset, ExperimentVOT): for s, (img_files, anno, _) in enumerate(dataset.dataset): seq_name = dataset.dataset.seq_names[s] bound = Image.open(img_files[0]).size if vot_un: anchor_file = os.path.join( dataset.result_dir, "Ours", "unsupervised", seq_name, "%s_anchor.pkl" % seq_name, ) perf = get_performance(dataset, trackers[0], "performance_un.json") video_per = perf[ trackers[0]]["seq_wise"][seq_name]["success_score"] succ_rank = [ perf[name]["seq_wise"][seq_name]["success_score"] for name in trackers ] succ_rank = len(succ_rank) + 1 - rankdata(succ_rank, method="ordinal") else: anchor_file = os.path.join( dataset.result_dir, "Ours", "baseline", seq_name, "%s_anchor.pkl" % seq_name, ) perf = get_performance(dataset, trackers[0]) video_per = perf[ trackers[0]]["seq_wise"][seq_name]["success_score"] succ_rank = [ perf[name]["seq_wise"][seq_name]["success_score"] for name in trackers ] succ_rank = len(succ_rank) + 1 - rankdata(succ_rank, method="ordinal") with open(anchor_file, "rb") as f: anchors = pickle.load(f) performance = 0 ratio = 0 for f, _ in enumerate(img_files): if anchors[f] is not None and ~np.any(np.isnan(anno[f])): iou = (poly_iou(np.array(anchors[f][-1]), anno[f], bound)[0] if len(anchors[f][-1]) > 1 else np.NaN) performance += iou ratio += 1 if ratio != 0: mean_performance += performance performance /= ratio video_perf.append(video_per) video_ranks.append(succ_rank[0]) performances.append(performance) mean_ratio += ratio total_length += len(img_files) ratio /= len(img_files) ratios.append(ratio) else: for s, (img_files, anno) in enumerate(dataset.dataset): seq_name = dataset.dataset.seq_names[s] bound = Image.open(img_files[0]).size anchor_file = os.path.join(dataset.result_dir, trackers[0], "anchor/%s_anchor.pkl" % seq_name) with open(anchor_file, "rb") as f: anchors = pickle.load(f) perf = get_performance(dataset, trackers[0]) video_per = perf[ trackers[0]]["seq_wise"][seq_name]["success_score"] succ_rank = [ perf[name]["seq_wise"][seq_name]["success_score"] for name in trackers ] succ_rank = len(succ_rank) + 1 - rankdata(succ_rank, method="ordinal") ratio = 0 performance = 0 for f, _ in enumerate(img_files): if anchors[f] is not None and ~np.any(np.isnan(anno[f])): iou = rect_iou(np.array(anchors[f][-1])[None, :], anno[f][None, :], bound=bound)[0] performance += iou ratio += 1 if ratio != 0: mean_performance += performance performance /= ratio video_perf.append(video_per) video_ranks.append(succ_rank[0]) performances.append(performance) mean_ratio += ratio total_length += len(img_files) ratio /= len(img_files) ratios.append(ratio) mean_performance /= mean_ratio mean_ratio /= total_length return ( ratios, mean_ratio, performances, mean_performance, video_perf, dataset.dataset.seq_names, video_ranks, )
def anchor_ratio_diff_performances(dataset, trackers, vot_un=False, disc=True): # function for loading results def read_record(filename): with open(filename) as f: record = f.read().strip().split("\n") record = [[float(t) for t in line.split(",")] for line in record] return record ratios = [] mean_ratio = 0 performances = [] mean_performance = 0 diffs = [] mean_diff = 0 total_length = 0 if isinstance(dataset, ExperimentVOT): for s, (img_files, anno, _) in enumerate(dataset.dataset): seq_name = dataset.dataset.seq_names[s] bound = Image.open(img_files[0]).size if vot_un: record_files = [ os.path.join( dataset.result_dir, name, "unsupervised", seq_name, "%s_%03d.txt" % (seq_name, 1), ) for name in trackers ] anchor_file = os.path.join( dataset.result_dir, "Ours", "unsupervised", seq_name, "%s_anchor.pkl" % seq_name, ) else: record_files = [ os.path.join( dataset.result_dir, name, "baseline", seq_name, "%s_%03d.txt" % (seq_name, 1), ) for name in trackers ] anchor_file = os.path.join( dataset.result_dir, "Ours", "baseline", seq_name, "%s_anchor.pkl" % seq_name, ) boxes = [read_record(f) for f in record_files] with open(anchor_file, "rb") as f: anchors = pickle.load(f) diff = 0 performance = 0 ratio = 0 for f, _ in enumerate(img_files): if anchors[f] is not None and ~np.any(np.isnan(anno[f])): iou = (poly_iou(np.array(anchors[f][-1]), anno[f], bound)[0] if len(anchors[f][-1]) > 1 else np.NaN) best_iou = max([ poly_iou(np.array(box[f]), anno[f], bound)[0] if len(box[f]) > 1 else np.NaN for box in boxes ]) if best_iou > iou + 0.01: if disc: diff += 1 else: diff += best_iou - iou performance += iou ratio += 1 if ratio != 0: mean_diff += diff mean_performance += performance diff /= ratio performance /= ratio diffs.append(diff) performances.append(performance) mean_ratio += ratio total_length += len(img_files) ratio /= len(img_files) ratios.append(ratio) else: for s, (img_files, anno) in enumerate(dataset.dataset): seq_name = dataset.dataset.seq_names[s] bound = Image.open(img_files[0]).size record_files = [ os.path.join(dataset.result_dir, name, "%s.txt" % seq_name) for name in trackers ] boxes = [ np.loadtxt(record_file, delimiter=",") for record_file in record_files ] anchor_file = os.path.join(dataset.result_dir, trackers[0], "anchor/%s_anchor.pkl" % seq_name) with open(anchor_file, "rb") as f: anchors = pickle.load(f) ratio = 0 performance = 0 diff = 0 for f, _ in enumerate(img_files): if anchors[f] is not None and ~np.any(np.isnan(anno[f])): iou = rect_iou(np.array(anchors[f][-1])[None, :], anno[f][None, :], bound=bound)[0] best_iou = max([ rect_iou(np.array(box[f])[None, :], anno[f][None, :], bound=bound)[0] for box in boxes ]) if best_iou > iou + 0.01: if disc: diff += 1 else: diff += best_iou - iou performance += iou ratio += 1 if ratio != 0: mean_diff += diff mean_performance += performance diff /= ratio performance /= ratio diffs.append(diff) performances.append(performance) mean_ratio += ratio total_length += len(img_files) ratio /= len(img_files) ratios.append(ratio) mean_performance /= mean_ratio mean_diff /= mean_ratio mean_ratio /= total_length return ( ratios, mean_ratio, diffs, mean_diff, performances, mean_performance, dataset.dataset.seq_names, )
def calc_error(dataset, seq_name, trackerss): # function for loading results def read_record(filename): with open(filename) as f: record = f.read().strip().split("\n") record = [[float(t) for t in line.split(",")] for line in record] return record trackers = trackerss.copy() trackers[trackerss.index("SiamFC_Res22")] = "SiamFC_Plus" if isinstance(dataset, ExperimentVOT): for s, (img_files, anno, _) in enumerate(dataset.dataset): if seq_name == dataset.dataset.seq_names[s]: break anno_rects = anno.copy() if anno_rects.shape[1] == 8: anno_rects = dataset.dataset._corner2rect(anno_rects) record_files = [ os.path.join( dataset.result_dir, name, "unsupervised", seq_name, "%s_%03d.txt" % (seq_name, 1), ) for name in trackers ] boxes = [read_record(f) for f in record_files] for box in boxes: box[0] = anno_rects[0] else: for s, (img_files, anno) in enumerate(dataset.dataset): if seq_name == dataset.dataset.seq_names[s]: break anno_rects = anno.copy() record_files = [ os.path.join(dataset.result_dir, name, "%s.txt" % seq_name) for name in trackers ] boxes = [ np.loadtxt(record_file, delimiter=",") for record_file in record_files ] boxes = np.array(boxes) boxes = np.array(boxes) losses = np.zeros((len(anno_rects) - 1, len(trackers))) for frame in range(1, len(anno_rects)): losses[frame - 1] = [ (1.0 - rect_iou(boxes[j, frame], anno_rects[frame])) for j in range(len(trackers)) ] sum_loss = np.sum(losses[:, 1:], axis=0) best_idx = np.argmin(sum_loss) regrets = [[ np.sum(losses[:i + 1, j], axis=0) - np.sum(losses[:i + 1, best_idx + 1], axis=0) for i in range(len(losses)) ] for j in range(len(trackers))] return regrets
def best_expert(dataset, seq_name, trackerss): # function for loading results def read_record(filename): with open(filename) as f: record = f.read().strip().split("\n") record = [[float(t) for t in line.split(",")] for line in record] return record trackers = trackerss.copy() trackers[trackerss.index("SiamFC_Res22")] = "SiamFC_Plus" if isinstance(dataset, ExperimentVOT): for s, (img_files, anno, _) in enumerate(dataset.dataset): if seq_name == dataset.dataset.seq_names[s]: break anno_rects = anno.copy() if anno_rects.shape[1] == 8: anno_rects = dataset.dataset._corner2rect(anno_rects) record_files = [ os.path.join( dataset.result_dir, name, "unsupervised", seq_name, "%s_%03d.txt" % (seq_name, 1), ) for name in trackers ] anchor_file = os.path.join( dataset.result_dir, "Ours", "unsupervised", seq_name, "%s_anchor.pkl" % seq_name, ) boxes = [read_record(f) for f in record_files] for box in boxes: box[0] = anno_rects[0] else: for s, (img_files, anno) in enumerate(dataset.dataset): if seq_name == dataset.dataset.seq_names[s]: break anno_rects = anno.copy() record_files = [ os.path.join(dataset.result_dir, name, "%s.txt" % seq_name) for name in trackers ] anchor_file = os.path.join(dataset.result_dir, trackers[0], "anchor/%s_anchor.pkl" % seq_name) boxes = [ np.loadtxt(record_file, delimiter=",") for record_file in record_files ] boxes = np.array(boxes) with open(anchor_file, "rb") as f: anchors = pickle.load(f) boxes = np.array(boxes) prev_anchor_frame = 0 losses = np.zeros((len(anno_rects) - 1, len(trackers))) for frame in range(1, len(anno_rects)): if anchors[frame] is not None: for i in range(prev_anchor_frame + 1, frame + 1): anchor = anchors[frame][i - (prev_anchor_frame + 1)] losses[frame - 1] = [(1.0 - rect_iou(boxes[j, i], anchor)) for j in range(len(trackers))] prev_anchor_frame = frame elif frame == len(anno_rects) - 1: for i in range(prev_anchor_frame + 1, frame + 1): losses[frame - 1] = [ (1.0 - rect_iou(boxes[j, i], anno_rects[i])) for j in range(len(trackers)) ] sum_loss = np.sum(losses[:, 1:], axis=0) best_idx = np.argmin(sum_loss) return best_idx + 1
def calc_metrics(boxes, anno): ious = rect_iou(boxes, anno) center_errors = center_error(boxes, anno) return ious, center_errors
def report(self, tracker_names, plot_curves=True): assert isinstance(tracker_names, (list, tuple)) # if self.subset == 'test': # pwd = os.getcwd() # # # generate compressed submission file for each tracker # for tracker_name in tracker_names: # # compress all tracking results # result_dir = os.path.join(self.result_dir, tracker_name) # os.chdir(result_dir) # save_file = '../%s' % tracker_name # compress('.', save_file) # print('Records saved at', save_file + '.zip') # # # switch back to previous working directory # os.chdir(pwd) # # return None # elif self.subset == 'val': # assume tracker_names[0] is your tracker report_dir = os.path.join(self.report_dir, tracker_names[0]) if not os.path.exists(report_dir): os.makedirs(report_dir) report_file = os.path.join(report_dir, 'performance.json') performance = {} for name in tracker_names: print('=> {} evaluating'.format(name)) left_ious = {} times = {} performance.update({name: {'sequence_wise': {}}}) for s, sequence in enumerate(self.dataset): sequence_name = self.dataset.sequence_directories[s].split( '\\')[-1] print('--Sequence %s (%dth in total %d)' % (sequence_name, s + 1, len(self.dataset))) record_files = glob.glob( os.path.join(self.result_dir, name, sequence_name, '*_bbox.txt')) if len(record_files) == 0: continue raise Exception('Results for sequence %s not found.' % sequence_name) left_annotations = np.array(sequence.left_annotation).reshape( -1, 4) right_annotations = np.array( sequence.right_annotation).reshape(-1, 4) # read results of all repetitions left_boxes = [] for record_file in record_files: data = np.loadtxt(record_file, dtype=float, delimiter=',') if (data.shape[0] == left_annotations.shape[0] - 1) and ( data.shape[0] == right_annotations.shape[0] - 1): left_boxes.append(data[:, :4]) else: print( "the dimension of tracking results is not matched to ground-truth" ) continue # calculate and stack all ious bound = ast.literal_eval(self.dataset.meta['resolution']) left_sequence_ious = [ rect_iou(b[0:], left_annotations[1:], bound=bound) for b in left_boxes ] left_sequence_ious = np.concatenate(left_sequence_ious) left_ious[os.path.join(sequence_name, 'left')] = left_sequence_ious # stack all tracking times times[sequence_name] = [] time_files = glob.glob( os.path.join(self.result_dir, name, sequence_name, '*_time.txt')) time_data = [np.loadtxt(t, delimiter=',') for t in time_files] sequence_times = np.concatenate(time_data) if len(sequence_times) > 0: times[sequence_name] = sequence_times # store sequence-wise performance left_ao, left_sr, left_speed, _ = self._evaluate( left_sequence_ious, sequence_times) performance[name]['sequence_wise'].update({ os.path.join(sequence_name, 'left'): { 'ao': left_ao, 'sr': left_sr, 'speed_fps': left_speed, 'length': sequence.__len__() - 1 } }) left_ious = np.concatenate(list(left_ious.values())) times = np.concatenate(list(times.values())) # store overall performance left_ao, left_sr, left_speed, left_succ_curve = self._evaluate( left_ious, times) performance[name].update({ 'overall_left': { 'ao': left_ao, 'sr': left_sr, 'speed_fps': left_speed, 'succ_curve': left_succ_curve.tolist() } }) # save performance with open(report_file, 'w') as f: json.dump(performance, f, indent=4) # plot success curves if plot_curves: keys = ['overall_left'] self._plot_curves([report_file], tracker_names[0], keys) return performance