コード例 #1
0
ファイル: eval_helpers.py プロジェクト: tienhaophung/poseval
def load_data_dir(argv):

  gt_dir, pred_dir, mode = process_arguments(argv)
  if not os.path.exists(gt_dir):
    help('Given GT directory ' + gt_dir + ' does not exist!\n')
  if not os.path.exists(pred_dir):
    help('Given prediction directory ' + pred_dir + ' does not exist!\n')
  filenames = glob.glob(gt_dir + "/*.json")
  gtFramesAll = []
  prFramesAll = []

  for i in range(len(filenames)):
    # load each annotation json file
    with open(filenames[i]) as data_file:
        data = json.load(data_file)
    if (not "annolist" in data):
        data = convert_videos(data)[0]
    gt = data["annolist"]
    for imgidx in range(len(gt)):
        gt[imgidx]["seq_id"] = i
        gt[imgidx]["seq_name"] = os.path.basename(filenames[i]).split('.')[0]
        for ridxGT in range(len(gt[imgidx]["annorect"])):
            if ("track_id" in gt[imgidx]["annorect"][ridxGT].keys()):
                # adjust track_ids to make them unique across all sequences
                assert(gt[imgidx]["annorect"][ridxGT]["track_id"][0] < MAX_TRACK_ID)
                gt[imgidx]["annorect"][ridxGT]["track_id"][0] += i*MAX_TRACK_ID
    gtFramesAll += gt
    gtBasename = os.path.basename(filenames[i])
    predFilename = pred_dir + gtBasename

    if (not os.path.exists(predFilename)):
        raise IOError('Prediction file ' + predFilename + ' does not exist')

    # load predictions
    with open(predFilename) as data_file:
        data = json.load(data_file)
    if (not "annolist" in data):
        data = convert_videos(data)[0]
    pr = data["annolist"]
    if (len(pr) <> len(gt)):
        raise Exception('# prediction frames %d <> # GT frames %d for %s' % (len(pr),len(gt),predFilename))
    for imgidx in range(len(pr)):
        track_id_frame = []
        for ridxPr in range(len(pr[imgidx]["annorect"])):
            if ("track_id" in pr[imgidx]["annorect"][ridxPr].keys()):
                track_id = pr[imgidx]["annorect"][ridxPr]["track_id"][0]
                track_id_frame += [track_id]
                # adjust track_ids to make them unique across all sequences
                assert(track_id < MAX_TRACK_ID)
                pr[imgidx]["annorect"][ridxPr]["track_id"][0] += i*MAX_TRACK_ID
        track_id_frame_unique = np.unique(np.array(track_id_frame)).tolist()
        if (len(track_id_frame) <> len(track_id_frame_unique)):
            raise Exception('Non-unique tracklet IDs found in frame %s of prediction %s' % (pr[imgidx]["image"][0]["name"],predFilename))
    prFramesAll += pr

  gtFramesAll,prFramesAll = cleanupData(gtFramesAll,prFramesAll)

  gtFramesAll,prFramesAll = removeIgnoredPoints(gtFramesAll,prFramesAll)

  return gtFramesAll, prFramesAll
コード例 #2
0
def load_data_dir(argv):

    gt_dir, pred_dir, mode = process_arguments(argv)
    if not os.path.exists(gt_dir):
        help('Given GT directory ' + gt_dir + ' does not exist!\n')
    if not os.path.exists(pred_dir):
        help('Given prediction directory ' + pred_dir + ' does not exist!\n')
    filenames = glob.glob(gt_dir + "/*.json")
    gtFramesAll = []
    prFramesAll = []

    for i in range(len(filenames)):
        # load each annotation json file
        #print('GT:')
        with open(filenames[i]) as data_file:
            data = json.load(data_file)
        if (not "annolist" in data):
            data = convert_videos(data)[0]
        gt = data["annolist"]
        for imgidx in range(len(gt)):
            gt[imgidx]["seq_id"] = i
            gt[imgidx]["seq_name"] = os.path.basename(
                filenames[i]).split('.')[0]
            for ridxGT in range(len(gt[imgidx]["annorect"])):
                if ("track_id" in gt[imgidx]["annorect"][ridxGT].keys()):
                    # adjust track_ids to make them unique across all sequences
                    #                print(os.path.basename(filenames[i]).split('.')[0])
                    #                print(gt[imgidx]["annorect"][ridxGT]["track_id"][0])
                    assert (gt[imgidx]["annorect"][ridxGT]["track_id"][0] <
                            MAX_TRACK_ID)
                    gt[imgidx]["annorect"][ridxGT]["track_id"][
                        0] += i * MAX_TRACK_ID
        gtFramesAll += gt
        gtBasename = os.path.basename(filenames[i])
        predFilename = pred_dir + gtBasename
        #print(xy)

        if (not os.path.exists(predFilename)):
            raise IOError('Prediction file ' + predFilename +
                          ' does not exist')

        # load predictions
#    print('PRED:')
        with open(predFilename) as data_file:
            data = json.load(data_file)
        if (not "annolist" in data):
            data = convert_videos(data)[0]
        pr = data["annolist"]
        if (len(pr) != len(gt)):
            raise Exception('# prediction frames %d != # GT frames %d for %s' %
                            (len(pr), len(gt), predFilename))
        for imgidx in range(len(pr)):
            for ridxPr in range(len(pr[imgidx]["annorect"])):
                if ("track_id" in pr[imgidx]["annorect"][ridxPr].keys()):
                    # adjust track_ids to make them unique across all sequences
                    #                print(os.path.basename(filenames[i]).split('.')[0])
                    #                print(pr[imgidx]["annorect"][ridxPr]["track_id"][0])
                    assert (pr[imgidx]["annorect"][ridxPr]["track_id"][0] <
                            MAX_TRACK_ID)
                    pr[imgidx]["annorect"][ridxPr]["track_id"][
                        0] += i * MAX_TRACK_ID
        prFramesAll += pr


#    print(xy)

    gtFramesAll, prFramesAll = cleanupData(gtFramesAll, prFramesAll)

    gtFramesAll, prFramesAll = removeIgnoredPoints(gtFramesAll, prFramesAll)
    return gtFramesAll, prFramesAll