def assess_dataset(tasks, predictions, iou_threshold, resolution_seconds=30): ''' Args: tasks: VideoObjectDict of tasks. Each task must include annotations. predictions: VideoObjectDict of predictions. Returns: Enough information to produce the plots. ''' frame_assessments = dataset.VideoObjectDict({ key: assess_sequence(tasks[key].labels, predictions[key], iou_threshold) for key in tasks.keys() }) return make_dataset_assessment(totals=dataset.VideoObjectDict({ key: assessment_sum(frame_assessments[key].values()) for key in frame_assessments.keys() }), quantized_totals=dataset.VideoObjectDict({ key: quantize_sequence_assessment( frame_assessments[key], init_time=tasks[key].init_time, resolution=(FRAME_RATE * resolution_seconds)) for key in frame_assessments.keys() }), frame_assessments=frame_assessments)
def load_dataset_assessment_json(f): data = json.load(f) return make_dataset_assessment( totals=dataset.VideoObjectDict( {tuple(vid_obj): total for vid_obj, total in data['totals']}), quantized_totals=dataset.VideoObjectDict({ tuple(vid_obj): QuantizedAssessment({ tuple(interval): total for interval, total in quantized_totals }) for vid_obj, quantized_totals in data['quantized_totals'] }))
def dataset_quality_interval(quantized_assessments, min_time=None, max_time=None, enable_bootstrap=True, num_trials=None, base_seed=0): ''' Args: totals: VideoObjectDict of per-sequence assessment dicts. ''' interval_totals = dataset.VideoObjectDict({ track: quantized_assessments[track].get(min_time, max_time) for track in quantized_assessments.keys() }) quality = summarize(interval_totals.values()) if enable_bootstrap: if num_trials is None: raise ValueError( 'must specify number of trials for bootstrap sampling') quality.update( bootstrap(summarize, interval_totals, num_trials, base_seed=base_seed)) quality = {k: np.asarray(v).tolist() for k, v in quality.items()} return quality
def union_dataset_assessment(x, y): '''Combines the tracks of two datasets.''' if y is None: return x if x is None: return y return { 'totals': dataset.VideoObjectDict( dict(itertools.chain(x['totals'].items(), y['totals'].items()))), 'quantized_totals': dataset.VideoObjectDict( dict( itertools.chain(x['quantized_totals'].items(), y['quantized_totals'].items()))), }
def load_predictions_and_select_frames(tasks, tracker_pred_dir, permissive=False, log_prefix=''): '''Loads all predictions of a tracker and takes the subset of frames with ground truth. Args: tasks: VideoObjectDict of Tasks. tracker_pred_dir: Directory that contains files video_object.csv Returns: VideoObjectDict of SparseTimeSeries of frame assessments. ''' logger.info('load predictions from "%s"', tracker_pred_dir) preds = dataset.VideoObjectDict() for track_num, vid_obj in enumerate(tasks.keys()): vid, obj = vid_obj track_name = vid + '_' + obj logger.debug(log_prefix + 'object {}/{} {}'.format(track_num + 1, len(tasks), track_name)) pred_file = os.path.join(tracker_pred_dir, '{}.csv'.format(track_name)) try: with open(pred_file, 'r') as fp: pred = io_pred.load_predictions_csv(fp) except IOError as exc: if permissive: logger.warning('exclude track %s: %s', track_name, str(exc)) else: raise pred = subset_using_previous_if_missing( pred, tasks[vid_obj].labels.sorted_keys()) preds[vid_obj] = pred return preds