Example #1
0
def main():

    args = parseArgs()
    print args
    argv = ['', args.groundTruth, args.predictions]

    print "Loading data"
    gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv)

    print "# gt frames  :", len(gtFramesAll)
    print "# pred frames:", len(prFramesAll)

    if (not os.path.exists(args.outputDir)):
        os.makedirs(args.outputDir)

    if (args.evalPoseEstimation):
        #####################################################
        # evaluate per-frame multi-person pose estimation (AP)

        # compute AP
        print "Evaluation of per-frame multi-person pose estimation"
        apAll, preAll, recAll = evaluateAP(gtFramesAll, prFramesAll,
                                           args.outputDir, True,
                                           args.saveEvalPerSequence)

        # print AP
        print "Average Precision (AP) metric:"
        eval_helpers.printTable(apAll)

    if (args.evalPoseTracking):
        #####################################################
        # evaluate multi-person pose tracking in video (MOTA)

        # compute MOTA
        print "Evaluation of video-based  multi-person pose tracking"
        metricsAll = evaluateTracking(gtFramesAll, prFramesAll, args.outputDir,
                                      True, args.saveEvalPerSequence)

        metrics = np.zeros([Joint().count + 4, 1])
        for i in range(Joint().count + 1):
            metrics[i, 0] = metricsAll['mota'][0, i]
        metrics[Joint().count + 1, 0] = metricsAll['motp'][0, Joint().count]
        metrics[Joint().count + 2, 0] = metricsAll['pre'][0, Joint().count]
        metrics[Joint().count + 3, 0] = metricsAll['rec'][0, Joint().count]

        # print AP
        print "Multiple Object Tracking (MOT) metrics:"
        eval_helpers.printTable(metrics, motHeader=True)
Example #2
0
def evaluateAP(gtFramesAll, prFramesAll, outputDir, bSaveAll=True, bSaveSeq=False):

    distThresh = 0.5

    seqidxs = []
    for imgidx in range(len(gtFramesAll)):
        seqidxs += [gtFramesAll[imgidx]["seq_id"]]
    seqidxs = np.array(seqidxs)

    seqidxsUniq = np.unique(seqidxs)
    nSeq = len(seqidxsUniq)

    names = Joint().name
    names['15'] = 'total'

    if (bSaveSeq):
        for si in range(nSeq):
            print("seqidx: %d/%d" % (si+1,nSeq))

            # extract frames IDs for the sequence
            imgidxs = np.argwhere(seqidxs == seqidxsUniq[si])
            seqName = gtFramesAll[imgidxs[0,0]]["seq_name"]

            gtFrames = [gtFramesAll[imgidx] for imgidx in imgidxs.flatten().tolist()]
            prFrames = [prFramesAll[imgidx] for imgidx in imgidxs.flatten().tolist()]

            # assign predicted poses to GT poses
            scores, labels, nGT, _ = eval_helpers.assignGTmulti(gtFrames, prFrames, distThresh)

            # compute average precision (AP), precision and recall per part
            ap, pre, rec = computeMetrics(scores, labels, nGT)
            metricsSeq = {'ap': ap.flatten().tolist(), 'pre': pre.flatten().tolist(), 'rec': rec.flatten().tolist(), 'names': names}

            filename = outputDir + '/' + seqName + '_AP_metrics.json'
            print('saving results to', filename)
            eval_helpers.writeJson(metricsSeq,filename)

    # assign predicted poses to GT poses
    scoresAll, labelsAll, nGTall, _ = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll, distThresh)

    # compute average precision (AP), precision and recall per part
    apAll, preAll, recAll = computeMetrics(scoresAll, labelsAll, nGTall)
    if (bSaveAll):
        metrics = {'ap': apAll.flatten().tolist(), 'pre': preAll.flatten().tolist(), 'rec': recAll.flatten().tolist(),  'names': names}
        filename = outputDir + '/total_AP_metrics.json'
        print('saving results to', filename)
        eval_helpers.writeJson(metrics,filename)

    return apAll, preAll, recAll
def evaluateAP(gtFramesAll, prFramesAll, bSaveAll=True, bSaveSeq=False):

    distThresh = 0.5

    names = Joint().name
    names['17'] = 'total'

    # assign predicted poses to GT poses
    scoresAll, labelsAll, nGTall = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll, distThresh)

    # compute average precision (AP), precision and recall per part
    apAll, preAll, recAll = computeMetrics(scoresAll, labelsAll, nGTall)
    if (bSaveAll):
        metrics = {'ap': apAll.flatten().tolist(), 'pre': preAll.flatten().tolist(), 'rec': recAll.flatten().tolist(),  'names': names}
        filename = './total_AP_metrics.json'
        eval_helpers.writeJson(metrics,filename)

    return apAll, preAll, recAll
Example #4
0
def computeMetrics(gtFramesAll, motAll, outputDir, bSaveAll, bSaveSeq):

    assert (len(gtFramesAll) == len(motAll))

    nJoints = Joint().count
    seqidxs = []
    for imgidx in range(len(gtFramesAll)):
        seqidxs += [gtFramesAll[imgidx]["seq_id"]]
    seqidxs = np.array(seqidxs)

    seqidxsUniq = np.unique(seqidxs)

    # intermediate metrics
    metricsMidNames = [
        'num_misses', 'num_switches', 'num_false_positives', 'num_objects',
        'num_detections'
    ]

    # final metrics computed from intermediate metrics
    metricsFinNames = ['mota', 'motp', 'pre', 'rec']

    # initialize intermediate metrics
    metricsMidAll = {}
    for name in metricsMidNames:
        metricsMidAll[name] = np.zeros([1, nJoints])
    metricsMidAll['sumD'] = np.zeros([1, nJoints])

    # initialize final metrics
    metricsFinAll = {}
    for name in metricsFinNames:
        metricsFinAll[name] = np.zeros([1, nJoints + 1])

    # create metrics
    mh = mm.metrics.create()

    imgidxfirst = 0
    # iterate over tracking sequences
    # seqidxsUniq = seqidxsUniq[:20]
    nSeq = len(seqidxsUniq)

    # initialize per-sequence metrics
    metricsSeqAll = {}
    for si in range(nSeq):
        metricsSeqAll[si] = {}
        for name in metricsFinNames:
            metricsSeqAll[si][name] = np.zeros([1, nJoints + 1])

    names = Joint().name
    names['15'] = 'total'

    for si in range(nSeq):
        print("seqidx: %d/%d" % (si + 1, nSeq))

        # init per-joint metrics accumulator
        accAll = {}
        for i in range(nJoints):
            accAll[i] = mm.MOTAccumulator(auto_id=True)

        # extract frames IDs for the sequence
        imgidxs = np.argwhere(seqidxs == seqidxsUniq[si])
        imgidxs = imgidxs[:-1].copy()
        seqName = gtFramesAll[imgidxs[0, 0]]["seq_name"]
        print(seqName)
        # create an accumulator that will be updated during each frame
        # iterate over frames
        for j in range(len(imgidxs)):
            imgidx = imgidxs[j, 0]
            # iterate over joints
            for i in range(nJoints):
                # GT tracking ID
                trackidxGT = motAll[imgidx][i]["trackidxGT"]
                # prediction tracking ID
                trackidxPr = motAll[imgidx][i]["trackidxPr"]
                # distance GT <-> pred part to compute MOT metrics
                # 'NaN' means force no match
                dist = motAll[imgidx][i]["dist"]
                # Call update once per frame
                accAll[i].update(
                    trackidxGT,  # Ground truth objects in this frame
                    trackidxPr,  # Detector hypotheses in this frame
                    dist  # Distances from objects to hypotheses
                )

        # compute intermediate metrics per joint per sequence
        for i in range(nJoints):
            metricsMid = mh.compute(accAll[i],
                                    metrics=metricsMidNames,
                                    return_dataframe=False,
                                    name='acc')
            for name in metricsMidNames:
                metricsMidAll[name][0, i] += metricsMid[name]
            s = accAll[i].events['D'].sum()
            if (np.isnan(s)):
                s = 0
            metricsMidAll['sumD'][0, i] += s

        if (bSaveSeq):
            # compute metrics per joint per sequence
            for i in range(nJoints):
                metricsMid = mh.compute(accAll[i],
                                        metrics=metricsMidNames,
                                        return_dataframe=False,
                                        name='acc')

                # compute final metrics per sequence
                if (metricsMid['num_objects'] > 0):
                    numObj = metricsMid['num_objects']
                else:
                    numObj = np.nan
                numFP = metricsMid['num_false_positives']
                metricsSeqAll[si]['mota'][0, i] = 100 * (
                    1. - 1. * (metricsMid['num_misses'] +
                               metricsMid['num_switches'] + numFP) / numObj)
                numDet = metricsMid['num_detections']
                s = accAll[i].events['D'].sum()
                if (numDet == 0 or np.isnan(s)):
                    metricsSeqAll[si]['motp'][0, i] = 0.0
                else:
                    metricsSeqAll[si]['motp'][0, i] = 100 * (1. -
                                                             (1. * s / numDet))
                if (numFP + numDet > 0):
                    totalDet = numFP + numDet
                else:
                    totalDet = np.nan
                metricsSeqAll[si]['pre'][0, i] = 100 * (1. * numDet / totalDet)
                metricsSeqAll[si]['rec'][0, i] = 100 * (1. * numDet / numObj)

            # average metrics over all joints per sequence
            idxs = np.argwhere(
                ~np.isnan(metricsSeqAll[si]['mota'][0, :nJoints]))
            metricsSeqAll[si]['mota'][0, nJoints] = metricsSeqAll[si]['mota'][
                0, idxs].mean()
            idxs = np.argwhere(
                ~np.isnan(metricsSeqAll[si]['motp'][0, :nJoints]))
            metricsSeqAll[si]['motp'][0, nJoints] = metricsSeqAll[si]['motp'][
                0, idxs].mean()
            idxs = np.argwhere(
                ~np.isnan(metricsSeqAll[si]['pre'][0, :nJoints]))
            metricsSeqAll[si]['pre'][0, nJoints] = metricsSeqAll[si]['pre'][
                0, idxs].mean()
            idxs = np.argwhere(
                ~np.isnan(metricsSeqAll[si]['rec'][0, :nJoints]))
            metricsSeqAll[si]['rec'][0, nJoints] = metricsSeqAll[si]['rec'][
                0, idxs].mean()

            metricsSeq = metricsSeqAll[si].copy()
            metricsSeq['mota'] = metricsSeq['mota'].flatten().tolist()
            metricsSeq['motp'] = metricsSeq['motp'].flatten().tolist()
            metricsSeq['pre'] = metricsSeq['pre'].flatten().tolist()
            metricsSeq['rec'] = metricsSeq['rec'].flatten().tolist()
            metricsSeq['names'] = names

            filename = outputDir + '/' + seqName + '_MOT_metrics.json'
            print('saving results to', filename)
            eval_helpers.writeJson(metricsSeq, filename)

    # compute final metrics per joint for all sequences
    for i in range(nJoints):
        if (metricsMidAll['num_objects'][0, i] > 0):
            numObj = metricsMidAll['num_objects'][0, i]
        else:
            numObj = np.nan
        numFP = metricsMidAll['num_false_positives'][0, i]
        metricsFinAll['mota'][0, i] = 100 * (
            1. - (metricsMidAll['num_misses'][0, i] +
                  metricsMidAll['num_switches'][0, i] + numFP) / numObj)
        numDet = metricsMidAll['num_detections'][0, i]
        s = metricsMidAll['sumD'][0, i]
        if (numDet == 0 or np.isnan(s)):
            metricsFinAll['motp'][0, i] = 0.0
        else:
            metricsFinAll['motp'][0, i] = 100 * (1. - (s / numDet))
        if (numFP + numDet > 0):
            totalDet = numFP + numDet
        else:
            totalDet = np.nan

        metricsFinAll['pre'][0, i] = 100 * (1. * numDet / totalDet)
        metricsFinAll['rec'][0, i] = 100 * (1. * numDet / numObj)

    # average metrics over all joints over all sequences
    idxs = np.argwhere(~np.isnan(metricsFinAll['mota'][0, :nJoints]))
    metricsFinAll['mota'][0, nJoints] = metricsFinAll['mota'][0, idxs].mean()
    idxs = np.argwhere(~np.isnan(metricsFinAll['motp'][0, :nJoints]))
    metricsFinAll['motp'][0, nJoints] = metricsFinAll['motp'][0, idxs].mean()
    idxs = np.argwhere(~np.isnan(metricsFinAll['pre'][0, :nJoints]))
    metricsFinAll['pre'][0, nJoints] = metricsFinAll['pre'][0, idxs].mean()
    idxs = np.argwhere(~np.isnan(metricsFinAll['rec'][0, :nJoints]))
    metricsFinAll['rec'][0, nJoints] = metricsFinAll['rec'][0, idxs].mean()

    if (bSaveAll):
        metricsFin = metricsFinAll.copy()
        metricsFin['mota'] = metricsFin['mota'].flatten().tolist()
        metricsFin['motp'] = metricsFin['motp'].flatten().tolist()
        metricsFin['pre'] = metricsFin['pre'].flatten().tolist()
        metricsFin['rec'] = metricsFin['rec'].flatten().tolist()
        metricsFin['names'] = names

        filename = outputDir + '/total_MOT_metrics.json'
        print('saving results to', filename)
        eval_helpers.writeJson(metricsFin, filename)

    return metricsFinAll
Example #5
0
def computeMetrics(gtFramesAll, motAll):

    assert (len(gtFramesAll) == len(motAll))

    pool = mp.Pool(min(12, len(gtFramesAll) + 1))

    nJoints = Joint().count
    seqidxs = []
    for imgidx in range(len(gtFramesAll)):
        seqidxs += [gtFramesAll[imgidx]["seq_id"]]
    seqidxs = np.array(seqidxs)

    seqidxsUniq = np.unique(seqidxs)

    # intermediate metrics
    metricsMidNames = [
        'num_misses', 'num_switches', 'num_false_positives', 'num_objects',
        'num_detections'
    ]

    # final metrics computed from intermediate metrics
    metricsFinNames = ['mota', 'motp', 'pre', 'rec']

    # initialize intermediate metrics
    metricsMidAll = {}
    for name in metricsMidNames:
        metricsMidAll[name] = np.zeros([1, nJoints])
    metricsMidAll['sumD'] = np.zeros([1, nJoints])

    # initialize final metrics
    metricsFinAll = {}
    for name in metricsFinNames:
        metricsFinAll[name] = np.zeros([1, nJoints + 1])

    # iterate over tracking sequences
    # seqidxsUniq = seqidxsUniq[:20]
    nSeq = len(seqidxsUniq)

    res_all_metricsMid = pool.map(
        partial(process_frame,
                nJoints=nJoints,
                seqidxs=seqidxs,
                seqidxsUniq=seqidxsUniq,
                motAll=motAll,
                metricsMidNames=metricsMidNames), range(nSeq))
    for si in range(nSeq):
        # compute intermediate metrics per joint per sequence
        all_metricsMid, accAll = res_all_metricsMid[si]
        for i in range(nJoints):
            metricsMid = all_metricsMid[i]
            for name in metricsMidNames:
                metricsMidAll[name][0, i] += metricsMid[name]
            metricsMidAll['sumD'][0, i] += accAll[i].events['D'].sum()

    # compute final metrics per joint for all sequences
    for i in range(nJoints):
        metricsFinAll['mota'][0, i] = 100 * (
            1. -
            (metricsMidAll['num_misses'][0, i] + metricsMidAll['num_switches'][
                0, i] + metricsMidAll['num_false_positives'][0, i]) /
            metricsMidAll['num_objects'][0, i])
        numDet = metricsMidAll['num_detections'][0, i]
        s = metricsMidAll['sumD'][0, i]
        if (numDet == 0 or np.isnan(s)):
            metricsFinAll['motp'][0, i] = 0.0
        else:
            metricsFinAll['motp'][0, i] = 100 * (1. - (s / numDet))
        metricsFinAll['pre'][
            0, i] = 100 * (metricsMidAll['num_detections'][0, i] /
                           (metricsMidAll['num_detections'][0, i] +
                            metricsMidAll['num_false_positives'][0, i]))
        metricsFinAll['rec'][0,
                             i] = 100 * (metricsMidAll['num_detections'][0, i]
                                         / metricsMidAll['num_objects'][0, i])

    # average metrics over all joints over all sequences
    metricsFinAll['mota'][0,
                          nJoints] = metricsFinAll['mota'][0, :nJoints].mean()
    metricsFinAll['motp'][0,
                          nJoints] = metricsFinAll['motp'][0, :nJoints].mean()
    metricsFinAll['pre'][0, nJoints] = metricsFinAll['pre'][0, :nJoints].mean()
    metricsFinAll['rec'][0, nJoints] = metricsFinAll['rec'][0, :nJoints].mean()

    return metricsFinAll
Example #6
0
def computeMetrics(gtFramesAll, motAll):

    assert (len(gtFramesAll) == len(motAll))

    nJoints = Joint().count
    seqidxs = []
    for imgidx in range(len(gtFramesAll)):
        seqidxs += [gtFramesAll[imgidx]["seq_id"]]
    seqidxs = np.array(seqidxs)

    seqidxsUniq = np.unique(seqidxs)

    # intermediate metrics
    metricsMidNames = [
        'num_misses', 'num_switches', 'num_false_positives', 'num_objects',
        'num_detections'
    ]

    # final metrics computed from intermediate metrics
    metricsFinNames = ['mota', 'motp', 'pre', 'rec']

    # initialize intermediate metrics
    metricsMidAll = {}
    for name in metricsMidNames:
        metricsMidAll[name] = np.zeros([1, nJoints])
    metricsMidAll['sumD'] = np.zeros([1, nJoints])

    # initialize final metrics
    metricsFinAll = {}
    for name in metricsFinNames:
        metricsFinAll[name] = np.zeros([1, nJoints + 1])

    # create metrics
    mh = mm.metrics.create()

    imgidxfirst = 0
    # iterate over tracking sequences
    # seqidxsUniq = seqidxsUniq[:20]
    nSeq = len(seqidxsUniq)
    for si in range(nSeq):
        print "seqidx: %d/%d" % (si + 1, nSeq)

        # init per-joint metrics accumulator
        accAll = {}
        for i in range(nJoints):
            accAll[i] = mm.MOTAccumulator(auto_id=True)

        # extract frames IDs for the sequence
        imgidxs = np.argwhere(seqidxs == seqidxsUniq[si])
        # DEBUG: remove the last frame of each sequence from evaluation due to buggy annotations
        print "DEBUG: remove last frame from eval until annotations are fixed"
        imgidxs = imgidxs[:-1].copy()
        # create an accumulator that will be updated during each frame
        # iterate over frames
        for j in range(len(imgidxs)):
            imgidx = imgidxs[j, 0]
            # iterate over joints
            for i in range(nJoints):
                # GT tracking ID
                trackidxGT = motAll[imgidx][i]["trackidxGT"]
                # prediction tracking ID
                trackidxPr = motAll[imgidx][i]["trackidxPr"]
                # distance GT <-> pred part to compute MOT metrics
                # 'NaN' means force no match
                dist = motAll[imgidx][i]["dist"]
                # Call update once per frame
                accAll[i].update(
                    trackidxGT,  # Ground truth objects in this frame
                    trackidxPr,  # Detector hypotheses in this frame
                    dist  # Distances from objects to hypotheses
                )

        # compute intermediate metrics per joint per sequence
        for i in range(nJoints):
            metricsMid = mh.compute(accAll[i],
                                    metrics=metricsMidNames,
                                    return_dataframe=False,
                                    name='acc')
            for name in metricsMidNames:
                metricsMidAll[name][0, i] += metricsMid[name]
            metricsMidAll['sumD'][0, i] += accAll[i].events['D'].sum()

    # compute final metrics per joint for all sequences
    for i in range(nJoints):
        metricsFinAll['mota'][0, i] = 100 * (
            1. -
            (metricsMidAll['num_misses'][0, i] + metricsMidAll['num_switches'][
                0, i] + metricsMidAll['num_false_positives'][0, i]) /
            metricsMidAll['num_objects'][0, i])
        numDet = metricsMidAll['num_detections'][0, i]
        s = metricsMidAll['sumD'][0, i]
        if (numDet == 0 or np.isnan(s)):
            metricsFinAll['motp'][0, i] = 0.0
        else:
            metricsFinAll['motp'][0, i] = 100 * (1. - (s / numDet))
        metricsFinAll['pre'][
            0, i] = 100 * (metricsMidAll['num_detections'][0, i] /
                           (metricsMidAll['num_detections'][0, i] +
                            metricsMidAll['num_false_positives'][0, i]))
        metricsFinAll['rec'][0,
                             i] = 100 * (metricsMidAll['num_detections'][0, i]
                                         / metricsMidAll['num_objects'][0, i])

    # average metrics over all joints over all sequences
    metricsFinAll['mota'][0,
                          nJoints] = metricsFinAll['mota'][0, :nJoints].mean()
    metricsFinAll['motp'][0,
                          nJoints] = metricsFinAll['motp'][0, :nJoints].mean()
    metricsFinAll['pre'][0, nJoints] = metricsFinAll['pre'][0, :nJoints].mean()
    metricsFinAll['rec'][0, nJoints] = metricsFinAll['rec'][0, :nJoints].mean()

    return metricsFinAll