示例#1
0
def evaluateAP(gtFramesAll, prFramesAll, outputDir, bSaveAll=True, bSaveSeq=False):

    distThresh = 0.5

    seqidxs = []
    for imgidx in range(len(gtFramesAll)):
        seqidxs += [gtFramesAll[imgidx]["seq_id"]]
    seqidxs = np.array(seqidxs)

    seqidxsUniq = np.unique(seqidxs)
    nSeq = len(seqidxsUniq)

    names = Joint().name
    names['15'] = 'total'

    if (bSaveSeq):
        for si in range(nSeq):
            print("seqidx: %d/%d" % (si+1,nSeq))

            # extract frames IDs for the sequence
            imgidxs = np.argwhere(seqidxs == seqidxsUniq[si])
            seqName = gtFramesAll[imgidxs[0,0]]["seq_name"]

            gtFrames = [gtFramesAll[imgidx] for imgidx in imgidxs.flatten().tolist()]
            prFrames = [prFramesAll[imgidx] for imgidx in imgidxs.flatten().tolist()]

            # assign predicted poses to GT poses
            scores, labels, nGT, _ = eval_helpers.assignGTmulti(gtFrames, prFrames, distThresh)

            # compute average precision (AP), precision and recall per part
            ap, pre, rec = computeMetrics(scores, labels, nGT)
            metricsSeq = {'ap': ap.flatten().tolist(), 'pre': pre.flatten().tolist(), 'rec': rec.flatten().tolist(), 'names': names}

            filename = outputDir + '/' + seqName + '_AP_metrics.json'
            print('saving results to', filename)
            eval_helpers.writeJson(metricsSeq,filename)

    # assign predicted poses to GT poses
    scoresAll, labelsAll, nGTall, _ = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll, distThresh)

    # compute average precision (AP), precision and recall per part
    apAll, preAll, recAll = computeMetrics(scoresAll, labelsAll, nGTall)
    if (bSaveAll):
        metrics = {'ap': apAll.flatten().tolist(), 'pre': preAll.flatten().tolist(), 'rec': recAll.flatten().tolist(),  'names': names}
        filename = outputDir + '/total_AP_metrics.json'
        print('saving results to', filename)
        eval_helpers.writeJson(metrics,filename)

    return apAll, preAll, recAll
示例#2
0
def evaluateTracking(gtFramesAll, prFramesAll, outputDir, saveAll=True, saveSeq=False):

    distThresh = 0.5
    # assign predicted poses to GT poses
    _, _, _, motAll = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll, distThresh)

    # compute MOT metrics per part
    metricsAll = computeMetrics(gtFramesAll, motAll, outputDir, saveAll, saveSeq)

    return metricsAll
示例#3
0
def evaluateTracking(gtFramesAll, prFramesAll, trackUpperBound):

    distThresh = 0.5
    # assign predicted poses to GT poses
    _, _, _, motAll = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll,
                                                 distThresh, trackUpperBound)

    # compute MOT metrics per part
    metricsAll = computeMetrics(gtFramesAll, motAll)

    return metricsAll
def evaluateTracking(gtFramesAll, prFramesAll, trackUpperBound):

    distThresh = 0.5
    # assign predicted poses to GT poses
    _, _, _, motAll = eval_helpers.assignGTmulti(
        gtFramesAll, prFramesAll, distThresh, trackUpperBound)

    # compute MOT metrics per part
    metricsAll = computeMetrics(gtFramesAll, motAll)

    return metricsAll
示例#5
0
def evaluateAP(gtFramesAll, prFramesAll):

    distThresh = 0.5

    # assign predicted poses to GT poses
    scoresAll, labelsAll, nGTall, _ = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll, distThresh)

    # compute average precision (AP), precision and recall per part
    apAll, preAll, recAll = computeMetrics(scoresAll, labelsAll, nGTall)

    return apAll, preAll, recAll
示例#6
0
def evaluateAP(gtFramesAll, prFramesAll):

    distThresh = 0.5

    # assign predicted poses to GT poses
    scoresAll, labelsAll, nGTall, _ = eval_helpers.assignGTmulti(
        gtFramesAll, prFramesAll, distThresh)

    # compute average precision (AP), precision and recall per part
    apAll, preAll, recAll = computeMetrics(scoresAll, labelsAll, nGTall)

    return apAll, preAll, recAll
def evaluateAP(gtFramesAll, prFramesAll, bSaveAll=True, bSaveSeq=False):

    distThresh = 0.5

    names = Joint().name
    names['17'] = 'total'

    # assign predicted poses to GT poses
    scoresAll, labelsAll, nGTall = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll, distThresh)

    # compute average precision (AP), precision and recall per part
    apAll, preAll, recAll = computeMetrics(scoresAll, labelsAll, nGTall)
    if (bSaveAll):
        metrics = {'ap': apAll.flatten().tolist(), 'pre': preAll.flatten().tolist(), 'rec': recAll.flatten().tolist(),  'names': names}
        filename = './total_AP_metrics.json'
        eval_helpers.writeJson(metrics,filename)

    return apAll, preAll, recAll
def evaluateTracking(gtFramesAll, prFramesAll, trackUpperBound):

    distThresh = 0.5
    # assign predicted poses to GT poses

    #     #############debug
    #     flag=False
    #     for imgidx_ in range(len(gtFramesAll)):
    #         name_=gtFramesAll[imgidx_]["image_name"]
    #         if "145.jpg" in name_ and "00522" in name_:
    #             flag=True
    #             print("assign before Catch!!!!!!")
    #             break
    #     if not flag:
    #         print("assign before Catch bad!!!!!!")
    #         assert(False)
    #     ##################

    _, _, _, motAll = eval_helpers.assignGTmulti(gtFramesAll, prFramesAll,
                                                 distThresh, trackUpperBound)
    #     _, _, _, motAll = eval_helpers.assignGT_pr_sub_one_multi(
    #         gtFramesAll, prFramesAll, distThresh, trackUpperBound)

    #      #############debug
    #     flag=False
    #     for imgidx_ in range(len(motAll)):
    #         name_=motAll[imgidx_]["image_name"]
    #         if "145.jpg" in name_ and "00522" in name_:
    #             flag=True
    #             print("assign after Catch!!!!!!")
    #             break
    #     if not flag:
    #         print("assign after Catch bad!!!!!!")
    #         assert(False)
    #     ##################

    # compute MOT metrics per part
    ###########jianbo
    metricsAll = computeMetrics(gtFramesAll, prFramesAll, motAll)
    ###########jianbo
    return metricsAll