コード例 #1
0
def _print_statistics(assessments, trackers, names=None):
    fields = ['TPR', 'TNR', 'GM', 'MaxGM']
    if args.bootstrap:
        # Include xxx_mean and xxx_var keys too.
        fields = list(
            itertools.chain.from_iterable([key, key + '_mean', key + '_var']
                                          for key in fields))
    names = names or {}
    stats = {
        tracker: {
            iou: (oxuva.dataset_quality(assessments[tracker][iou]['totals'],
                                        enable_bootstrap=args.bootstrap,
                                        num_trials=args.bootstrap_trials))
            for iou in args.iou_thresholds
        }
        for tracker in trackers
    }
    table_dir = os.path.join('analysis', args.data, args.challenge)
    _ensure_dir_exists(table_dir)
    table_file = os.path.join(table_dir, 'table.csv')
    logger.info('write table to %s', table_file)
    with open(table_file, 'w') as f:
        fieldnames = ['tracker'] + [
            metric + '_' + str(iou) for iou in args.iou_thresholds
            for metric in fields
        ]
        print(','.join(fieldnames), file=f)
        for tracker in trackers:
            row = [names.get(tracker, tracker)] + [
                '{:.6g}'.format(stats[tracker][iou][metric])
                for iou in args.iou_thresholds for metric in fields
            ]
            print(','.join(row), file=f)
コード例 #2
0
def _plot_present_absent(assessments,
                         trackers,
                         iou_threshold,
                         bootstrap,
                         names=None,
                         colors=None,
                         markers=None):
    names = names or {}
    colors = colors or {}
    markers = markers or {}

    stats_whole = {
        tracker:
        oxuva.dataset_quality(assessments[tracker][iou_threshold]['totals'],
                              enable_bootstrap=bootstrap,
                              num_trials=args.bootstrap_trials)
        for tracker in trackers
    }
    stats_all_present = {
        tracker: oxuva.dataset_quality_filter(
            assessments[tracker][iou_threshold]['totals'],
            require_none_absent=True,
            enable_bootstrap=bootstrap,
            num_trials=args.bootstrap_trials)
        for tracker in trackers
    }
    stats_any_absent = {
        tracker: oxuva.dataset_quality_filter(
            assessments[tracker][iou_threshold]['totals'],
            require_some_absent=True,
            enable_bootstrap=bootstrap,
            num_trials=args.bootstrap_trials)
        for tracker in trackers
    }

    order = _sort_quality(stats_whole)
    tpr_key = 'TPR_mean' if bootstrap else 'TPR'
    max_tpr = max(
        max([stats_all_present[tracker][tpr_key] for tracker in trackers]),
        max([stats_any_absent[tracker][tpr_key] for tracker in trackers]))

    plt.figure(figsize=(args.width_inches, args.height_inches))
    plt.xlabel('TPR (tracks without absent labels)')
    plt.ylabel('TPR (tracks with some absent labels)')
    for tracker in order:
        if bootstrap:
            plot_func = functools.partial(
                _errorbar,
                xerr=args.errorbar_size *
                np.sqrt([stats_all_present[tracker]['TPR_var']]),
                yerr=args.errorbar_size *
                np.sqrt([stats_any_absent[tracker]['TPR_var']]),
                capsize=3)
        else:
            plot_func = plt.plot
        plot_func([stats_all_present[tracker][tpr_key]],
                  [stats_any_absent[tracker][tpr_key]],
                  label=names.get(tracker, tracker),
                  color=colors.get(tracker, None),
                  marker=markers.get(tracker, None),
                  markerfacecolor='none',
                  markeredgewidth=2,
                  clip_on=False)
    plt.xlim(xmin=0, xmax=_ceil_nearest(CLEARANCE * max_tpr, 0.1))
    plt.ylim(ymin=0, ymax=_ceil_nearest(CLEARANCE * max_tpr, 0.1))
    plt.grid(color=GRID_COLOR, clip_on=False)
    _hide_spines()
    # Draw a diagonal line.
    plt.plot([0, 1], [0, 1], color=GRID_COLOR, linewidth=1, linestyle='dotted')
    plot_dir = os.path.join('analysis', args.data, args.challenge)
    _ensure_dir_exists(plot_dir)
    base_name = (
        'present_absent_iou_{}'.format(oxuva.float2str(iou_threshold)) +
        ('_bootstrap' if bootstrap else ''))
    _save_fig(os.path.join(plot_dir, base_name + '_no_legend.pdf'))
    _legend_outside()
    _save_fig(os.path.join(plot_dir, base_name + '.pdf'))
コード例 #3
0
def _plot_intervals(assessments,
                    trackers,
                    iou_threshold,
                    bootstrap,
                    names=None,
                    colors=None,
                    markers=None):
    # TODO: Add errorbars using bootstrap sampling?
    names = names or {}
    colors = colors or {}
    markers = markers or {}
    times_sec = range(0, args.max_time + 1, args.time_step)

    # Get overall stats for order in legend.
    overall_stats = {
        tracker:
        oxuva.dataset_quality(assessments[tracker][iou_threshold]['totals'],
                              enable_bootstrap=bootstrap,
                              num_trials=args.bootstrap_trials)
        for tracker in trackers
    }
    order = _sort_quality(overall_stats, use_bootstrap_mean=False)

    intervals_sec = {}
    points = {}
    for mode in INTERVAL_TYPES:
        intervals_sec[mode], points[mode] = _make_intervals(times_sec, mode)

    stats = {
        mode: {
            tracker: [
                oxuva.dataset_quality_interval(
                    assessments[tracker][iou_threshold]['quantized_totals'],
                    min_time=None if min_time is None else FRAME_RATE *
                    min_time,
                    max_time=None if max_time is None else FRAME_RATE *
                    max_time,
                    enable_bootstrap=bootstrap,
                    num_trials=args.bootstrap_trials)
                for min_time, max_time in intervals_sec[mode]
            ]
            for tracker in trackers
        }
        for mode in INTERVAL_TYPES
    }
    tpr_key = 'TPR_mean' if bootstrap else 'TPR'
    # Find maximum TPR value over all plots (to have same axes).
    max_tpr = {
        mode:
        max(s[tpr_key] for tracker in trackers for s in stats[mode][tracker])
        for mode in INTERVAL_TYPES
    }

    for mode in INTERVAL_TYPES:
        plt.figure(figsize=(args.width_inches, args.height_inches))
        plt.xlabel(INTERVAL_AXIS_LABEL[mode])
        plt.ylabel('True Positive Rate')
        for tracker in order:
            tpr = [s.get(tpr_key, None) for s in stats[mode][tracker]]
            if bootstrap:
                tpr_var = [
                    s.get('TPR_var', None) for s in stats[mode][tracker]
                ]
                plot_func = functools.partial(_errorbar,
                                              yerr=args.errorbar_size *
                                              np.sqrt(tpr_var),
                                              capsize=3)
            else:
                plot_func = plt.plot
            plot_func(1 / 60.0 * np.asarray(points[mode]),
                      tpr,
                      label=names.get(tracker, tracker),
                      marker=markers.get(tracker, None),
                      color=colors.get(tracker, None),
                      markerfacecolor='none',
                      markeredgewidth=2,
                      clip_on=False)
        plt.xlim(xmin=0, xmax=args.max_time / 60.0)
        ymax = max(max_tpr.values()) if args.same_axes else max_tpr[mode]
        plt.ylim(ymin=0, ymax=_ceil_nearest(CLEARANCE * ymax, 0.1))
        plt.grid(color=GRID_COLOR, clip_on=False)
        _hide_spines()
        plot_dir = os.path.join('analysis', args.data, args.challenge)
        _ensure_dir_exists(plot_dir)
        base_name = ('tpr_time_iou_{}_interval_{}'.format(
            oxuva.float2str(iou_threshold), mode) +
                     ('_bootstrap' if bootstrap else ''))
        _save_fig(os.path.join(plot_dir, base_name + '_no_legend.pdf'))
        _legend_outside()
        _save_fig(os.path.join(plot_dir, base_name + '.pdf'))
コード例 #4
0
def _plot_tpr_tnr_intervals(assessments,
                            trackers,
                            names=None,
                            colors=None,
                            markers=None):
    modes = ['before', 'after']
    intervals_sec = {}
    for mode in modes:
        intervals_sec[mode], _ = _make_intervals(args.times, mode)

    bootstrap_modes = [False, True] if args.bootstrap else [False]
    for bootstrap in bootstrap_modes:
        for iou in args.iou_thresholds:
            # Order by performance on all frames.
            stats = {
                tracker:
                oxuva.dataset_quality(assessments[tracker][iou]['totals'],
                                      enable_bootstrap=bootstrap,
                                      num_trials=args.bootstrap_trials)
                for tracker in trackers
            }
            order = _sort_quality(stats, use_bootstrap_mean=False)

            tpr_key = 'TPR_mean' if bootstrap else 'TPR'
            # Get stats for all plots to establish axis range.
            # Note: This means that dataset_quality_interval() is called twice.
            max_tpr = max([
                max([
                    max([
                        oxuva.dataset_quality_interval(
                            assessments[tracker][iou]['quantized_totals'],
                            min_time=None if min_time is None else FRAME_RATE *
                            min_time,
                            max_time=None if max_time is None else FRAME_RATE *
                            max_time,
                            enable_bootstrap=bootstrap,
                            num_trials=args.bootstrap_trials)[tpr_key]
                        for tracker in trackers
                    ]) for min_time, max_time in intervals_sec[mode]
                ]) for mode in modes
            ])

            for mode in modes:
                for min_time_sec, max_time_sec in intervals_sec[mode]:
                    base_name = '_'.join([
                        'tpr_tnr', 'iou_' + oxuva.float2str(iou),
                        'interval_{}_{}'.format(oxuva.float2str(min_time_sec),
                                                oxuva.float2str(max_time_sec))
                    ] + (['bootstrap'] if bootstrap else []))
                    _plot_tpr_tnr(base_name,
                                  assessments,
                                  trackers,
                                  iou,
                                  bootstrap,
                                  posthoc=False,
                                  min_time_sec=min_time_sec,
                                  max_time_sec=max_time_sec,
                                  max_tpr=max_tpr,
                                  order=order,
                                  names=names,
                                  colors=colors,
                                  markers=markers)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(formatter_class=ARGS_FORMATTER)
    _add_arguments(parser)
    global args
    args = parser.parse_args()
    logging.basicConfig(level=getattr(logging, args.loglevel.upper()))

    dataset_names = _get_datasets(args.data)
    # Load tasks without annotations.
    dataset_tasks = {
        dataset: _load_tasks(
            os.path.join(REPO_DIR, 'dataset', 'tasks', dataset + '.csv'))
        for dataset in dataset_names
    }
    # Take union of all datasets.
    tasks = {
        key: task
        for dataset in dataset_names
        for key, task in dataset_tasks[dataset].items()
    }

    tracker_names = _load_tracker_names()
    trackers = set(tracker_names.keys())
    dataset_assessments = {}
    for dataset in dataset_names:
        dataset_assessments[dataset] = _get_assessments(dataset, trackers)
        # Take subset of trackers for which it was possible to load results.
        trackers = set(dataset_assessments[dataset].keys())
    if len(trackers) < 1:
        raise RuntimeError('could not obtain assessment of any trackers')

    # Assign colors and markers alphabetically to achieve invariance across plots.
    trackers = sorted(trackers, key=lambda s: s.lower())
    color_list = _generate_colors(len(trackers))
    tracker_colors = dict(zip(trackers, color_list))
    tracker_markers = dict(zip(trackers, itertools.cycle(MARKERS)))

    # Merge tracks from all datasets.
    # TODO: Ensure that none have same key?
    assessments = {}
    for tracker in trackers:
        assessments[tracker] = {}
        for iou in args.iou_thresholds:
            assessments[tracker][iou] = functools.reduce(
                oxuva.union_dataset_assessment,
                (dataset_assessments[dataset][tracker][iou]
                 for dataset in dataset_names), None)

    # Use simple metrics to get ranking.
    rank_quality = {
        tracker: oxuva.dataset_quality(assessments[tracker][0.5]['totals'],
                                       enable_bootstrap=False)
        for tracker in trackers
    }
    trackers = _sort_quality(rank_quality)
    top_trackers = trackers[:args.top] if args.top else trackers

    if args.subcommand == 'table':
        _print_statistics(assessments, trackers, tracker_names)
    elif args.subcommand == 'plot_tpr_tnr':
        _plot_tpr_tnr_overall(assessments, top_trackers, tracker_names,
                              tracker_colors, tracker_markers)
    elif args.subcommand == 'plot_tpr_tnr_intervals':
        _plot_tpr_tnr_intervals(assessments, top_trackers, tracker_names,
                                tracker_colors, tracker_markers)
    elif args.subcommand == 'plot_tpr_time':
        for iou in args.iou_thresholds:
            for bootstrap in ([False, True] if args.bootstrap else [False]):
                _plot_intervals(assessments, top_trackers, iou, bootstrap,
                                tracker_names, tracker_colors, tracker_markers)
    elif args.subcommand == 'plot_present_absent':
        for iou in args.iou_thresholds:
            for bootstrap in ([False, True] if args.bootstrap else [False]):
                _plot_present_absent(assessments, top_trackers, iou, bootstrap,
                                     tracker_names, tracker_colors,
                                     tracker_markers)