def vis(roidb, detections_pkl, thresh, output_dir):
    if len(roidb) == 0:
        return
    with open(detections_pkl, 'rb') as f:
        dets = pickle.load(f)

    all_boxes = dets['all_boxes']
    if 'all_keyps' in dets:
        all_keyps = dets['all_keyps']
    else:
        all_keyps = None
    if 'all_tracks' in dets:
        all_tracks = dets['all_tracks']
    else:
        all_tracks = None

    for ix, entry in enumerate(tqdm(roidb)):
        if entry['boxes'] is None or entry['boxes'].shape[0] == 0:
            continue
        gt, pred = _generate_visualizations(
            entry, ix, all_boxes, all_keyps, all_tracks, thresh)
        combined = np.hstack((gt, pred))
        im_name = entry['image']
        if isinstance(im_name, list):
            im_name = im_name[len(im_name) // 2]
        out_name = im_name[len(dataset.image_directory):]
        out_path = osp.join(output_dir, out_name)
        gen_utils.mkdir_p(osp.dirname(out_path))
        cv2.imwrite(out_path, combined)
Пример #2
0
def vis(roidb, detections_pkl, thresh, output_dir):
    if len(roidb) == 0:
        return
    with open(detections_pkl, 'rb') as f:
        dets = pickle.load(f)

    all_boxes = dets['all_boxes']
    if 'all_keyps' in dets:
        all_keyps = dets['all_keyps']
    else:
        all_keyps = None
    if 'all_tracks' in dets:
        all_tracks = dets['all_tracks']
    else:
        all_tracks = None

    for ix, entry in enumerate(tqdm(roidb)):
        if entry['boxes'] is None or entry['boxes'].shape[0] == 0:
            pass
#             continue
        gt, pred = _generate_visualizations(entry, ix, all_boxes, all_keyps,
                                            all_tracks, thresh)
        combined = np.hstack((gt, pred))
        im_name = entry['image']
        if isinstance(im_name, list):
            im_name = im_name[len(im_name) // 2]
        out_name = im_name[len(dataset.image_directory):]
        out_path = osp.join(output_dir, out_name)
        gen_utils.mkdir_p(osp.dirname(out_path))
        #         cv2.imwrite(out_path, combined)
        ################ jianbo
        cv2.imwrite(out_path, pred)
Пример #3
0
def _convert_video_frame_ids(inpath, outpath):
    """
    PoseTrack videos follow no consistent naming for frames. Make it consistent
    """
    mkdir_p(outpath)
    frame_names = [osp.basename(el) for el in glob.glob(osp.join(
        inpath, '*.jpg'))]
    # Some videos have 00XX_crop.jpg style filenames
    frame_ids = [int(el.split('.')[0].split('_')[0]) for el in frame_names]
    id_to_name = dict(zip(frame_ids, frame_names))
    for i, fid in enumerate(sorted(frame_ids)):
        shutil.copy('{}/{}'.format(inpath, id_to_name[fid]),
                    '{}/{:08d}.jpg'.format(outpath, i + 1))
def _convert_video_frame_ids(inpath, outpath):
    """
    PoseTrack videos follow no consistent naming for frames. Make it consistent
    """
    mkdir_p(outpath)
    frame_names = [
        osp.basename(el) for el in glob.glob(osp.join(inpath, '*.jpg'))
    ]
    # Some videos have 00XX_crop.jpg style filenames
    frame_ids = [int(el.split('.')[0].split('_')[0]) for el in frame_names]
    id_to_name = dict(zip(frame_ids, frame_names))
    for i, fid in enumerate(sorted(frame_ids)):
        shutil.copy('{}/{}'.format(inpath, id_to_name[fid]),
                    '{}/{:08d}.jpg'.format(outpath, i + 1))
Пример #5
0
def _run_eval_single_video(vname, out_filenames, output_dir, dataset, eval_tracking):
    per_vid_tmp_dir = tempfile.mkdtemp()
    gen_utils.mkdir_p(per_vid_tmp_dir)
    # in case it previously existed and has anything in it
    gen_utils.mkdir_p(osp.join(per_vid_tmp_dir, 'gt/'))
    gen_utils.mkdir_p(osp.join(per_vid_tmp_dir, 'pred/'))
    voutname = out_filenames[osp.join('images', vname)]
    pred_path = osp.join(
        output_dir, voutname)
    gt_path = osp.join(
        dataset.annotation_directory, voutname)
    shutil.copyfile(gt_path, osp.join(per_vid_tmp_dir, 'gt', voutname))
    shutil.copyfile(pred_path, osp.join(per_vid_tmp_dir, 'pred', voutname))
    try:
        score_ap, score_mot = _run_eval(
            osp.join(per_vid_tmp_dir, 'gt/'),
            osp.join(per_vid_tmp_dir, 'pred/'),
            eval_tracking)
    except Exception as e:
        logger.error('Unable to process video {} due to {}'.format(
            vname, e))
        score_ap = np.nan
        score_mot = np.nan
    gen_utils.run_cmd('rm -rf {}'.format(per_vid_tmp_dir), print_cmd=False)
    return (vname, score_ap, score_mot)
Пример #6
0
def _run_posetrack_eval(roidb, det_file, dataset, output_dir):
    with open(det_file, 'rb') as fin:
        dets = pkl.load(fin)
    assert len(roidb) == len(dets['all_boxes'][1]), \
        'Mismatch {} vs {}'.format(len(roidb), len(dets['all_boxes'][1]))
    gen_utils.mkdir_p(output_dir)
    out_filenames = video2filenames(dataset.annotation_directory)
    out_data = {}  # each video to all predictions
    eval_tracking = False
    if 'all_tracks' in dets:
        eval_tracking = True
    for i, entry in enumerate(roidb):
        image_name = get_image_path(entry)[len(dataset.image_directory):]
        video_name = osp.dirname(image_name)
        frame_num = int(osp.basename(image_name).split('.')[0])
        boxes = dets['all_boxes'][1][i]
        kps = dets['all_keyps'][1][i]
        if eval_tracking:  # means there is a "all_tracks" in the dets
            tracks = dets['all_tracks'][1][i]
        else:
            tracks = [1] * len(kps)
        data_el = {
            'image': image_name,
            'imagenum': [frame_num],
            'annorect': _convert_data_to_annorect_struct(boxes, kps, tracks),
        }
        if video_name in out_data:
            out_data[video_name].append(data_el)
        else:
            out_data[video_name] = [data_el]

    logger.info('Saving the JSON files to {}'.format(output_dir))
    # clear out the previous predictions, if any
    gen_utils.run_cmd('rm -r {}/*'.format(output_dir), print_cmd=False)
    for vname in tqdm(out_data.keys(), desc='Writing JSON files for eval'):
        vdata = out_data[vname]
        outfpath = osp.join(output_dir, out_filenames['images' + vname])
        #             output_dir, out_filenames[osp.join('images', vname)])
        with open(outfpath, 'w') as fout:
            json.dump({'annolist': vdata}, fout, indent=4)

    logger.info('Wrote all predictions in JSON to {}'.format(output_dir))
    logger.info('Running dataset level evaluation...')
    st_time = time.time()
    ####jianbo add this line and comment following two lines
    score_ap, score_mot, apAll, preAll, recAll, mota = _run_eval(
        dataset.annotation_directory, output_dir, eval_tracking)
    #     logger.info(_run_eval(dataset.annotation_directory, output_dir, eval_tracking))
    #     logger.info('...Done in {}'.format(time.time() - st_time))
    #####
    # TODO(rgirdhar): Do this better
    if cfg.EVAL.EVAL_MPII_PER_VIDEO:  # run the evaluation per-video
        res = []
        logger.info('Running per-video evaluation...')
        st_time = time.time()
        pervid_outpath = osp.join(
            osp.dirname(osp.normpath(output_dir)),
            osp.basename(det_file) + '_per_video_scores.txt')
        # Earlier I used multi-processing to compute the predictions in parallel
        # but now I've updated the eval code itself to use multiprocessing so
        # can not use multiprocessing here (else it gives an error that daemon
        # processes can not spawn children). Hense setting num processes to 0.
        res = map(
            partial(_run_eval_single_video,
                    out_filenames=out_filenames,
                    output_dir=output_dir,
                    dataset=dataset,
                    eval_tracking=eval_tracking), out_data.keys())
        logger.info('...Done in {} seconds'.format(time.time() - st_time))
        res = sorted(res, key=lambda x: x[1])  # sort on score
        logger.info('Writing per-video scores to {}'.format(pervid_outpath))
        with open(pervid_outpath, 'w') as fout:
            for el in res:
                fout.write('{} {} {}\n'.format(el[0], el[1], el[2]))
    return score_ap, score_mot, apAll, preAll, recAll, mota
Пример #7
0
def _run_posetrack_eval(roidb, det_file, dataset, output_dir):
    with open(det_file, 'rb') as fin:
        dets = pkl.load(fin)
    assert len(roidb) == len(dets['all_boxes'][1]), \
        'Mismatch {} vs {}'.format(len(roidb), len(dets['all_boxes'][1]))
    gen_utils.mkdir_p(output_dir)
    out_filenames = video2filenames(dataset.annotation_directory)
    out_data = {}  # each video to all predictions
    eval_tracking = False
    if 'all_tracks' in dets:
        eval_tracking = True
    for i, entry in enumerate(roidb):
        image_name = get_image_path(entry)[len(dataset.image_directory):]
        video_name = osp.dirname(image_name)
        frame_num = int(osp.basename(image_name).split('.')[0])
        boxes = dets['all_boxes'][1][i]
        kps = dets['all_keyps'][1][i]
        if eval_tracking:  # means there is a "all_tracks" in the dets
            tracks = dets['all_tracks'][1][i]
        else:
            tracks = [1] * len(kps)
        data_el = {
            'image': image_name,
            'imagenum': [frame_num],
            'annorect': _convert_data_to_annorect_struct(boxes, kps, tracks),
        }
        if video_name in out_data:
            out_data[video_name].append(data_el)
        else:
            out_data[video_name] = [data_el]

    logger.info('Saving the JSON files to {}'.format(output_dir))
    # clear out the previous predictions, if any
    gen_utils.run_cmd('rm -r {}/*'.format(output_dir), print_cmd=False)
    for vname in tqdm(out_data.keys(), desc='Writing JSON files for eval'):
        vdata = out_data[vname]
        outfpath = osp.join(
            output_dir, out_filenames[osp.join('images', vname)])
        with open(outfpath, 'w') as fout:
            json.dump({'annolist': vdata}, fout)
    logger.info('Wrote all predictions in JSON to {}'.format(output_dir))
    logger.info('Running dataset level evaluation...')
    st_time = time.time()
    logger.info(_run_eval(dataset.annotation_directory, output_dir, eval_tracking))
    logger.info('...Done in {}'.format(time.time() - st_time))
    # TODO(rgirdhar): Do this better
    if cfg.EVAL.EVAL_MPII_PER_VIDEO:  # run the evaluation per-video
        res = []
        logger.info('Running per-video evaluation...')
        st_time = time.time()
        pervid_outpath = osp.join(
            osp.dirname(osp.normpath(output_dir)),
            osp.basename(det_file) + '_per_video_scores.txt')
        # Earlier I used multi-processing to compute the predictions in parallel
        # but now I've updated the eval code itself to use multiprocessing so
        # can not use multiprocessing here (else it gives an error that daemon
        # processes can not spawn children). Hense setting num processes to 0.
        res = map(partial(
            _run_eval_single_video,
            out_filenames=out_filenames,
            output_dir=output_dir,
            dataset=dataset,
            eval_tracking=eval_tracking), out_data.keys())
        logger.info('...Done in {} seconds'.format(time.time() - st_time))
        res = sorted(res, key=lambda x: x[1])  # sort on score
        logger.info('Writing per-video scores to {}'.format(pervid_outpath))
        with open(pervid_outpath, 'w') as fout:
            for el in res:
                fout.write('{} {} {}\n'.format(el[0], el[1], el[2]))