Exemple #1
0
def boxiou(a, b):
    """Computes IOU of two rectangles."""
    a_min, a_max = rect_min_max(a)
    b_min, b_max = rect_min_max(b)
    # Compute intersection.
    i_min = np.maximum(a_min, b_min)
    i_max = np.minimum(a_max, b_max)
    i_size = np.maximum(i_max - i_min, 0)
    i_vol = np.prod(i_size, axis=-1)
    # Get volume of union.
    a_size = np.maximum(a_max - a_min, 0)
    b_size = np.maximum(b_max - b_min, 0)
    a_vol = np.prod(a_size, axis=-1)
    b_vol = np.prod(b_size, axis=-1)
    u_vol = a_vol + b_vol - i_vol
    return np.where(i_vol == 0, np.zeros_like(i_vol, dtype=np.float64),
                    math_util.quiet_divide(i_vol, u_vol))
Exemple #2
0
def eval_single_class(names, accs):
    """Evaluate CLEAR MOT results for each class."""
    mh = mm.metrics.create()
    summary = mh.compute_many(accs,
                              names=names,
                              metrics=METRIC_MAPS.keys(),
                              generate_overall=True)
    results = [v['OVERALL'] for k, v in summary.to_dict().items()]
    motp_ind = list(METRIC_MAPS).index('motp')
    if np.isnan(results[motp_ind]):
        num_dets = mh.compute_many(accs,
                                   names=names,
                                   metrics=['num_detections'],
                                   generate_overall=True)
        sum_motp = (summary['motp'] * num_dets['num_detections']).sum()
        motp = quiet_divide(sum_motp, num_dets['num_detections']['OVERALL'])
        results[motp_ind] = float(1 - motp)
    return results
Exemple #3
0
def aggregate_eval_results(summary,
                           metrics,
                           cats,
                           mh,
                           generate_overall=True,
                           class_average=False):
    if generate_overall and not class_average:
        cats.append('OVERALL')
    new_summary = pd.DataFrame(columns=metrics)
    for cat in cats:
        s = summary[summary.index.str.startswith(
            str(cat))] if cat != 'OVERALL' else summary
        res_sum = s.sum()
        new_res = []
        for metric in metrics:
            if metric == 'mota':
                res = 1. - quiet_divide(
                    res_sum['num_misses'] + res_sum['num_switches'] +
                    res_sum['num_false_positives'], res_sum['num_objects'])
            elif metric == 'motp':
                res = quiet_divide((s['motp'] * s['num_detections']).sum(),
                                   res_sum['num_detections'])
            elif metric == 'idf1':
                res = quiet_divide(
                    2 * res_sum['idtp'],
                    res_sum['num_objects'] + res_sum['num_predictions'])
            else:
                res = res_sum[metric]
            new_res.append(res)
        new_summary.loc[cat] = new_res

    new_summary['motp'] = (1 - new_summary['motp']) * 100

    if generate_overall and class_average:
        new_res = []
        res_average = new_summary.fillna(0).mean()
        res_sum = new_summary.sum()
        for metric in metrics:
            if metric in ['mota', 'motp', 'idf1']:
                new_res.append(res_average[metric])
            else:
                new_res.append(res_sum[metric])
        new_summary.loc['OVERALL'] = new_res

    dtypes = [
        'float' if m in ['mota', 'motp', 'idf1'] else 'int' for m in metrics
    ]
    dtypes = {m: d for m, d in zip(metrics, dtypes)}
    new_summary = new_summary.astype(dtypes)

    strsummary = mm.io.render_summary(
        new_summary,
        formatters=mh.formatters,
        namemap={
            'mostly_tracked': 'MT',
            'mostly_lost': 'ML',
            'num_false_positives': 'FP',
            'num_misses': 'FN',
            'num_switches': 'IDs',
            'mota': 'MOTA',
            'motp': 'MOTP',
            'idf1': 'IDF1'
        })
    print(strsummary)
    return new_summary
def motp_m(partials, num_detections):
    res = 0
    for v in partials:
        res += v['motp'] * v['num_detections']
    return math_util.quiet_divide(res, num_detections)
def idf1_m(partials, idtp, num_objects, num_predictions):
    del partials  # unused
    return math_util.quiet_divide(2 * idtp, num_objects + num_predictions)
Exemple #6
0
 def precision_overall(summary_df, overall_dic):
     del summary_df
     precision = quiet_divide(overall_dic['num_detections'], (
         overall_dic['num_false_positives'] + overall_dic['num_detections']))
     return precision
Exemple #7
0
 def idr_overall(summary_df, overall_dic):
     del summary_df
     idr = quiet_divide(overall_dic['idtp'],
                        (overall_dic['idtp'] + overall_dic['idfn']))
     return idr
def recall_m(partials, num_detections, num_objects):
    del partials  # unused
    return math_util.quiet_divide(num_detections, num_objects)
def idp(df, idtp, idfp):
    """ID measures: global min-cost precision."""
    del df  # unused
    return math_util.quiet_divide(idtp, idtp + idfp)
def precision_m(partials, num_detections, num_false_positives):
    del partials  # unused
    return math_util.quiet_divide(num_detections,
                                  num_false_positives + num_detections)
def recall(df, num_detections, num_objects):
    """Number of detections over number of objects."""
    del df  # unused
    return math_util.quiet_divide(num_detections, num_objects)
def precision(df, num_detections, num_false_positives):
    """Number of detected objects over sum of detected and false positives."""
    del df  # unused
    return math_util.quiet_divide(num_detections,
                                  num_false_positives + num_detections)
def mota_m(partials, num_misses, num_switches, num_false_positives,
           num_objects):
    del partials  # unused
    return 1. - math_util.quiet_divide(
        num_misses + num_switches + num_false_positives, num_objects)
def mota(df, num_misses, num_switches, num_false_positives, num_objects):
    """Multiple object tracker accuracy."""
    del df  # unused
    return 1. - math_util.quiet_divide(
        num_misses + num_switches + num_false_positives, num_objects)
Exemple #15
0
 def motp_overall(summary_df, overall_dic):
     motp = quiet_divide((summary_df['motp'] *
                          summary_df['num_detections']).sum(),
                         overall_dic['num_detections'])
     return motp
def idr(df, idtp, idfn):
    """ID measures: global min-cost recall."""
    del df  # unused
    return math_util.quiet_divide(idtp, idtp + idfn)
Exemple #17
0
 def mota_overall(summary_df, overall_dic):
     del summary_df
     mota = 1. - quiet_divide(
         (overall_dic['num_misses'] + overall_dic['num_switches'] +
          overall_dic['num_false_positives']), overall_dic['num_objects'])
     return mota
def idr_m(partials, idtp, idfn):
    del partials  # unused
    return math_util.quiet_divide(idtp, idtp + idfn)
Exemple #19
0
 def recall_overall(summary_df, overall_dic):
     del summary_df
     recall = quiet_divide(overall_dic['num_detections'],
                           overall_dic['num_objects'])
     return recall
def idf1(df, idtp, num_objects, num_predictions):
    """ID measures: global min-cost F1 score."""
    del df  # unused
    return math_util.quiet_divide(2 * idtp, num_objects + num_predictions)
Exemple #21
0
 def idf1_overall(summary_df, overall_dic):
     del summary_df
     idf1 = quiet_divide(2. * overall_dic['idtp'], (
         overall_dic['num_objects'] + overall_dic['num_predictions']))
     return idf1
def motp(df, num_detections):
    """Multiple object tracker precision."""
    return math_util.quiet_divide(df.noraw['D'].sum(), num_detections)