Пример #1
0
def _plot_tpr_tnr_overall(assessments,
                          trackers,
                          names=None,
                          colors=None,
                          markers=None):
    bootstrap_modes = [False, True] if args.bootstrap else [False]

    for iou in args.iou_thresholds:
        for bootstrap in bootstrap_modes:
            for posthoc in ([False] if bootstrap else [False, True]):
                _plot_tpr_tnr(('tpr_tnr_iou_' + oxuva.float2str(iou) +
                               ('_posthoc' if posthoc else '') +
                               ('_bootstrap' if bootstrap else '')),
                              assessments,
                              trackers,
                              iou,
                              bootstrap,
                              posthoc,
                              names=names,
                              colors=colors,
                              markers=markers,
                              min_time_sec=None,
                              max_time_sec=None,
                              include_score=True)
Пример #2
0
def _plot_present_absent(assessments,
                         trackers,
                         iou_threshold,
                         bootstrap,
                         names=None,
                         colors=None,
                         markers=None):
    names = names or {}
    colors = colors or {}
    markers = markers or {}

    stats_whole = {
        tracker:
        oxuva.dataset_quality(assessments[tracker][iou_threshold]['totals'],
                              enable_bootstrap=bootstrap,
                              num_trials=args.bootstrap_trials)
        for tracker in trackers
    }
    stats_all_present = {
        tracker: oxuva.dataset_quality_filter(
            assessments[tracker][iou_threshold]['totals'],
            require_none_absent=True,
            enable_bootstrap=bootstrap,
            num_trials=args.bootstrap_trials)
        for tracker in trackers
    }
    stats_any_absent = {
        tracker: oxuva.dataset_quality_filter(
            assessments[tracker][iou_threshold]['totals'],
            require_some_absent=True,
            enable_bootstrap=bootstrap,
            num_trials=args.bootstrap_trials)
        for tracker in trackers
    }

    order = _sort_quality(stats_whole)
    tpr_key = 'TPR_mean' if bootstrap else 'TPR'
    max_tpr = max(
        max([stats_all_present[tracker][tpr_key] for tracker in trackers]),
        max([stats_any_absent[tracker][tpr_key] for tracker in trackers]))

    plt.figure(figsize=(args.width_inches, args.height_inches))
    plt.xlabel('TPR (tracks without absent labels)')
    plt.ylabel('TPR (tracks with some absent labels)')
    for tracker in order:
        if bootstrap:
            plot_func = functools.partial(
                _errorbar,
                xerr=args.errorbar_size *
                np.sqrt([stats_all_present[tracker]['TPR_var']]),
                yerr=args.errorbar_size *
                np.sqrt([stats_any_absent[tracker]['TPR_var']]),
                capsize=3)
        else:
            plot_func = plt.plot
        plot_func([stats_all_present[tracker][tpr_key]],
                  [stats_any_absent[tracker][tpr_key]],
                  label=names.get(tracker, tracker),
                  color=colors.get(tracker, None),
                  marker=markers.get(tracker, None),
                  markerfacecolor='none',
                  markeredgewidth=2,
                  clip_on=False)
    plt.xlim(xmin=0, xmax=_ceil_nearest(CLEARANCE * max_tpr, 0.1))
    plt.ylim(ymin=0, ymax=_ceil_nearest(CLEARANCE * max_tpr, 0.1))
    plt.grid(color=GRID_COLOR, clip_on=False)
    _hide_spines()
    # Draw a diagonal line.
    plt.plot([0, 1], [0, 1], color=GRID_COLOR, linewidth=1, linestyle='dotted')
    plot_dir = os.path.join('analysis', args.data, args.challenge)
    _ensure_dir_exists(plot_dir)
    base_name = (
        'present_absent_iou_{}'.format(oxuva.float2str(iou_threshold)) +
        ('_bootstrap' if bootstrap else ''))
    _save_fig(os.path.join(plot_dir, base_name + '_no_legend.pdf'))
    _legend_outside()
    _save_fig(os.path.join(plot_dir, base_name + '.pdf'))
Пример #3
0
def _plot_intervals(assessments,
                    trackers,
                    iou_threshold,
                    bootstrap,
                    names=None,
                    colors=None,
                    markers=None):
    # TODO: Add errorbars using bootstrap sampling?
    names = names or {}
    colors = colors or {}
    markers = markers or {}
    times_sec = range(0, args.max_time + 1, args.time_step)

    # Get overall stats for order in legend.
    overall_stats = {
        tracker:
        oxuva.dataset_quality(assessments[tracker][iou_threshold]['totals'],
                              enable_bootstrap=bootstrap,
                              num_trials=args.bootstrap_trials)
        for tracker in trackers
    }
    order = _sort_quality(overall_stats, use_bootstrap_mean=False)

    intervals_sec = {}
    points = {}
    for mode in INTERVAL_TYPES:
        intervals_sec[mode], points[mode] = _make_intervals(times_sec, mode)

    stats = {
        mode: {
            tracker: [
                oxuva.dataset_quality_interval(
                    assessments[tracker][iou_threshold]['quantized_totals'],
                    min_time=None if min_time is None else FRAME_RATE *
                    min_time,
                    max_time=None if max_time is None else FRAME_RATE *
                    max_time,
                    enable_bootstrap=bootstrap,
                    num_trials=args.bootstrap_trials)
                for min_time, max_time in intervals_sec[mode]
            ]
            for tracker in trackers
        }
        for mode in INTERVAL_TYPES
    }
    tpr_key = 'TPR_mean' if bootstrap else 'TPR'
    # Find maximum TPR value over all plots (to have same axes).
    max_tpr = {
        mode:
        max(s[tpr_key] for tracker in trackers for s in stats[mode][tracker])
        for mode in INTERVAL_TYPES
    }

    for mode in INTERVAL_TYPES:
        plt.figure(figsize=(args.width_inches, args.height_inches))
        plt.xlabel(INTERVAL_AXIS_LABEL[mode])
        plt.ylabel('True Positive Rate')
        for tracker in order:
            tpr = [s.get(tpr_key, None) for s in stats[mode][tracker]]
            if bootstrap:
                tpr_var = [
                    s.get('TPR_var', None) for s in stats[mode][tracker]
                ]
                plot_func = functools.partial(_errorbar,
                                              yerr=args.errorbar_size *
                                              np.sqrt(tpr_var),
                                              capsize=3)
            else:
                plot_func = plt.plot
            plot_func(1 / 60.0 * np.asarray(points[mode]),
                      tpr,
                      label=names.get(tracker, tracker),
                      marker=markers.get(tracker, None),
                      color=colors.get(tracker, None),
                      markerfacecolor='none',
                      markeredgewidth=2,
                      clip_on=False)
        plt.xlim(xmin=0, xmax=args.max_time / 60.0)
        ymax = max(max_tpr.values()) if args.same_axes else max_tpr[mode]
        plt.ylim(ymin=0, ymax=_ceil_nearest(CLEARANCE * ymax, 0.1))
        plt.grid(color=GRID_COLOR, clip_on=False)
        _hide_spines()
        plot_dir = os.path.join('analysis', args.data, args.challenge)
        _ensure_dir_exists(plot_dir)
        base_name = ('tpr_time_iou_{}_interval_{}'.format(
            oxuva.float2str(iou_threshold), mode) +
                     ('_bootstrap' if bootstrap else ''))
        _save_fig(os.path.join(plot_dir, base_name + '_no_legend.pdf'))
        _legend_outside()
        _save_fig(os.path.join(plot_dir, base_name + '.pdf'))
Пример #4
0
def _plot_tpr_tnr_intervals(assessments,
                            trackers,
                            names=None,
                            colors=None,
                            markers=None):
    modes = ['before', 'after']
    intervals_sec = {}
    for mode in modes:
        intervals_sec[mode], _ = _make_intervals(args.times, mode)

    bootstrap_modes = [False, True] if args.bootstrap else [False]
    for bootstrap in bootstrap_modes:
        for iou in args.iou_thresholds:
            # Order by performance on all frames.
            stats = {
                tracker:
                oxuva.dataset_quality(assessments[tracker][iou]['totals'],
                                      enable_bootstrap=bootstrap,
                                      num_trials=args.bootstrap_trials)
                for tracker in trackers
            }
            order = _sort_quality(stats, use_bootstrap_mean=False)

            tpr_key = 'TPR_mean' if bootstrap else 'TPR'
            # Get stats for all plots to establish axis range.
            # Note: This means that dataset_quality_interval() is called twice.
            max_tpr = max([
                max([
                    max([
                        oxuva.dataset_quality_interval(
                            assessments[tracker][iou]['quantized_totals'],
                            min_time=None if min_time is None else FRAME_RATE *
                            min_time,
                            max_time=None if max_time is None else FRAME_RATE *
                            max_time,
                            enable_bootstrap=bootstrap,
                            num_trials=args.bootstrap_trials)[tpr_key]
                        for tracker in trackers
                    ]) for min_time, max_time in intervals_sec[mode]
                ]) for mode in modes
            ])

            for mode in modes:
                for min_time_sec, max_time_sec in intervals_sec[mode]:
                    base_name = '_'.join([
                        'tpr_tnr', 'iou_' + oxuva.float2str(iou),
                        'interval_{}_{}'.format(oxuva.float2str(min_time_sec),
                                                oxuva.float2str(max_time_sec))
                    ] + (['bootstrap'] if bootstrap else []))
                    _plot_tpr_tnr(base_name,
                                  assessments,
                                  trackers,
                                  iou,
                                  bootstrap,
                                  posthoc=False,
                                  min_time_sec=min_time_sec,
                                  max_time_sec=max_time_sec,
                                  max_tpr=max_tpr,
                                  order=order,
                                  names=names,
                                  colors=colors,
                                  markers=markers)
Пример #5
0
def _get_assessments(dataset, trackers):
    '''
    Args:
        dataset: String that identifies dataset ("dev" or "test").
        trackers: List of tracker names.

    Returns:
        Dictionary that maps [tracker][iou] to dataset assessment.
        Only returns assessments for subset of trackers that were successful.
    '''
    # Create functions to load tasks with annotations on demand.
    # (Do not need annotations if using cached assessments.)
    # TODO: Code would be easier to read using a class with lazy-cached elements as members?
    get_annotations = oxuva.LazyCacheCaller(
        functools.partial(
            _load_tasks_with_annotations,
            os.path.join(REPO_DIR, 'dataset', 'annotations',
                         dataset + '.csv')))

    assessments = {}
    for tracker_ind, tracker in enumerate(trackers):
        try:
            log_context = 'tracker {}/{} {}'.format(tracker_ind + 1,
                                                    len(trackers), tracker)
            tracker_assessments = {}
            # Load predictions at most once for all IOU thresholds (can be slow).
            get_predictions = oxuva.LazyCacheCaller(
                lambda: oxuva.load_predictions_and_select_frames(
                    get_annotations(),
                    os.path.join('predictions', dataset, tracker),
                    permissive=args.permissive,
                    log_prefix=log_context + ': '))
            # Obtain results at all IOU thresholds in order to make axes equal in all graphs.
            # TODO: Is it unsafe to use float (iou) as dictionary key?
            for iou in args.iou_thresholds:
                logger.info('assess tracker "%s" with iou %g', tracker, iou)
                assess_func = lambda: oxuva.assess_dataset(get_annotations(),
                                                           get_predictions(),
                                                           iou,
                                                           resolution_seconds=
                                                           30)
                if args.use_summary:
                    tracker_assessments[iou] = oxuva.cache(
                        oxuva.Protocol(dump=oxuva.dump_dataset_assessment_json,
                                       load=oxuva.load_dataset_assessment_json,
                                       binary=False),
                        os.path.join(
                            'assess', dataset, tracker,
                            'iou_{}.json'.format(oxuva.float2str(iou))),
                        assess_func)
                else:
                    # When it is not cached, it will include frame_assessments.
                    # TODO: Could cache (selected frames of) predictions to file if this is slow.
                    tracker_assessments[iou] = assess_func()
        except IOError as ex:
            logger.warning(
                'could not obtain assessment of tracker "%s" on dataset "%s": %s',
                tracker, dataset, ex)
        else:
            assessments[tracker] = tracker_assessments
    return assessments