Example #1
0
def inference(
    detection_cfg,
    skeleton_cfg,
    dataset_cfg,
    gpus=1,
    worker_per_gpu=1,
):
    # get frame num
    video_file = dataset_cfg.video_file
    video_name = video_file.strip('/n').split('/')[-1]
    video_frames = mmcv.VideoReader(video_file)
    num_frames = len(video_frames)
    del video_frames

    data_cfg = skeleton_cfg.data_cfg
    if data_cfg.save_video:
        data_cfg.img_dir = os.path.join(data_cfg.save_dir,
                                        '{}.img'.format(video_name))

        if os.path.exists(data_cfg.img_dir):
            import shutil
            shutil.rmtree(data_cfg.img_dir)

        os.makedirs(data_cfg.img_dir)

    # cache model checkpoints
    cache_checkpoint(detection_cfg.checkpoint_file)
    cache_checkpoint(skeleton_cfg.checkpoint_file)

    # multiprocess settings
    context = mp.get_context('spawn')
    result_queue = context.Queue(num_frames)
    procs = []
    for w in range(gpus * worker_per_gpu):
        shred_list = list(range(w, num_frames, gpus * worker_per_gpu))
        p = context.Process(target=worker,
                            args=(video_file, shred_list, detection_cfg,
                                  skeleton_cfg, data_cfg, w % gpus,
                                  result_queue))
        p.start()
        procs.append(p)
    all_result = []
    print('\nPose estimation start:')
    prog_bar = ProgressBar(num_frames)
    for i in range(num_frames):
        t = result_queue.get()
        all_result.append(t)
        prog_bar.update()
    for p in procs:
        p.join()
    if len(all_result) == num_frames and data_cfg.save_video:
        print('\n\nGenerate video:')
        video_path = os.path.join(data_cfg.save_dir, video_name)
        mmcv.frames2video(data_cfg.img_dir,
                          video_path,
                          filename_tmpl='{:01d}.png')
        print('Video was saved to {}'.format(video_path))

    import IPython
    IPython.embed()
Example #2
0
def multi_gpu_extract(model,
                      data_loader,
                      tmpdir=None,
                      gpu_collect=False,
                      progress=False):
    model.eval()

    results = defaultdict(list)
    dataset = data_loader.dataset
    rank, world_size = get_dist_info()
    if rank == 0 and progress:
        prog_bar = ProgressBar(len(dataset))

    time.sleep(2)  # prevent deadlock problem in some cases
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            feats = model(mode='test', **data)

        results['feats'].append(feats.cpu())
        results['pids'].append(data['pid'])
        results['camids'].append(data['camid'])

        if rank == 0 and progress:
            prog_bar.update(data['img'].shape[0] * world_size)

    for key, val in results.items():
        results[key] = torch.cat(val, dim=0)

    # collect results from all ranks
    if gpu_collect:
        results = collect_results_gpu(results, len(dataset))
    else:
        results = collect_results_cpu(results, len(dataset), tmpdir)

    return results
Example #3
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img_root', type=str, help='Image root path')
    parser.add_argument('img_list', type=str, help='Image path list file')
    parser.add_argument('config', type=str, help='Config file')
    parser.add_argument('checkpoint', type=str, help='Checkpoint file')
    parser.add_argument('--score-thr',
                        type=float,
                        default=0.5,
                        help='Bbox score threshold')
    parser.add_argument('--out-dir',
                        type=str,
                        default='./results',
                        help='Dir to save '
                        'visualize images '
                        'and bbox')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference.')
    args = parser.parse_args()

    assert 0 < args.score_thr < 1

    # build the model from a config file and a checkpoint file
    model = init_detector(args.config, args.checkpoint, device=args.device)
    if hasattr(model, 'module'):
        model = model.module
    if model.cfg.data.test['type'] == 'ConcatDataset':
        model.cfg.data.test.pipeline = model.cfg.data.test['datasets'][
            0].pipeline

    # Start Inference
    out_vis_dir = osp.join(args.out_dir, 'out_vis_dir')
    mmcv.mkdir_or_exist(out_vis_dir)
    out_txt_dir = osp.join(args.out_dir, 'out_txt_dir')
    mmcv.mkdir_or_exist(out_txt_dir)

    lines = list_from_file(args.img_list)
    progressbar = ProgressBar(task_num=len(lines))
    for line in lines:
        progressbar.update()
        img_path = osp.join(args.img_root, line.strip())
        if not osp.exists(img_path):
            raise FileNotFoundError(img_path)
        # Test a single image
        result = model_inference(model, img_path)
        img_name = osp.basename(img_path)
        # save result
        save_results(result, out_txt_dir, img_name, score_thr=args.score_thr)
        # show result
        out_file = osp.join(out_vis_dir, img_name)
        kwargs_dict = {
            'score_thr': args.score_thr,
            'show': False,
            'out_file': out_file
        }
        model.show_result(img_path, result, **kwargs_dict)

    print(f'\nInference done, and results saved in {args.out_dir}\n')
Example #4
0
def main():
    parser = ArgumentParser()
    parser.add_argument('config', type=str, help='Config file')
    parser.add_argument('checkpoint', type=str, help='Checkpoint file')
    parser.add_argument('img_root', type=str, help='Image root path')
    parser.add_argument('img_list', type=str, help='Image path list file')

    parser.add_argument('--score-thr',
                        type=float,
                        default=0.5,
                        help='Bbox score threshold')
    parser.add_argument('--out-dir',
                        type=str,
                        default='./results',
                        help='Dir to save '
                        'visualize images '
                        'and bbox')
    args = parser.parse_args()

    assert args.score_thr > 0 and args.score_thr < 1

    # build the model from a config file and a checkpoint file
    device = 'cuda:' + str(torch.cuda.current_device())
    model = init_detector(args.config, args.checkpoint, device=device)
    if hasattr(model, 'module'):
        model = model.module
    if model.cfg.data.test['type'] == 'ConcatDataset':
        model.cfg.data.test.pipeline = model.cfg.data.test['datasets'][
            0].pipeline

    # Start Inference
    out_vis_dir = osp.join(args.out_dir, 'out_vis_dir')
    mmcv.mkdir_or_exist(out_vis_dir)

    total_img_num = sum([1 for _ in open(args.img_list)])
    progressbar = ProgressBar(task_num=total_img_num)
    with codecs.open(args.img_list, 'r', 'utf-8') as fr:
        for line in fr:
            progressbar.update()
            img_path = args.img_root + '/' + line.strip()
            print(img_path)
            if not osp.exists(img_path):
                raise FileNotFoundError(img_path)
            # Test a single image
            result = inference_detector(model, img_path)
            img_name = osp.basename(img_path)
            out_file = osp.join(out_vis_dir, img_name)
            kwargs_dict = {
                'score_thr': args.score_thr,
                'show': False,
                'out_file': out_file
            }
            model.show_result(img_path, result, **kwargs_dict)

    print(f'\nInference done, and results saved in {args.out_dir}\n')
Example #5
0
def dataset_analysis(dataset_cfg, mask_channel=2, workers=16, batch_size=16):
    dataset = call_obj(**dataset_cfg)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=workers)

    prog_bar = ProgressBar(len(dataset))
    for k, (data, mask) in enumerate(data_loader):
        assert mask.size(1) == 1
        n = data.size(0)
        c = data.size(1)
        if k == 0:
            means = [[] for i in range(c)]
            stds = [[] for i in range(c)]
        mask = mask.expand(data.size()).type_as(data)
        data = data * mask
        sum = data.reshape(n * c, -1).sum(1)
        num = mask.reshape(n * c, -1).sum(1)
        mean = sum / num
        diff = (data.reshape(n * c, -1) - mean.view(n * c, 1)) * mask.view(
            n * c, -1)
        std = ((diff**2).sum(1) / num)**0.5
        mean = mean.view(n, c)
        std = std.view(n, c)
        for i in range(c):
            m = mean[:, i]
            m = m[~torch.isnan(m)]
            if len(m) > 0:
                means[i].append(m.mean())
            s = std[:, i]
            s = s[~torch.isnan(s)]
            if len(s) > 0:
                stds[i].append(s.mean())
        for i in range(n):
            prog_bar.update()
    means = [np.mean(m) for m in means]
    stds = [np.mean(s) for s in stds]
    print('\n\nDataset analysis result:')
    print('\tmean of channels : {}'.format(means))
    print('\tstd of channels  : {}'.format(stds))
Example #6
0
def single_gpu_extract(model, data_loader, progress=False):
    model.eval()

    results = defaultdict(list)
    dataset = data_loader.dataset
    if progress:
        prog_bar = ProgressBar(len(dataset))

    for i, data in enumerate(data_loader):
        with torch.no_grad():
            feats = model(mode='test', **data)

        results['feats'].append(feats.cpu())
        results['pids'].append(data['pid'])
        results['camids'].append(data['camid'])

        if progress:
            prog_bar.update(data['img'].shape[0])

    for key, val in results.items():
        results[key] = torch.cat(val, dim=0)

    return results
Example #7
0
def main():
    parser = ArgumentParser()
    parser.add_argument('img_root_path', type=str, help='Image root path')
    parser.add_argument('img_list', type=str, help='Image path list file')
    parser.add_argument('config', type=str, help='Config file')
    parser.add_argument('checkpoint', type=str, help='Checkpoint file')
    parser.add_argument(
        '--out-dir', type=str, default='./results', help='Dir to save results')
    parser.add_argument(
        '--show', action='store_true', help='show image or save')
    parser.add_argument(
        '--device', default='cuda:0', help='Device used for inference.')
    args = parser.parse_args()

    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(args.out_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level='INFO')

    # build the model from a config file and a checkpoint file
    model = init_detector(args.config, args.checkpoint, device=args.device)
    if hasattr(model, 'module'):
        model = model.module

    # Start Inference
    out_vis_dir = osp.join(args.out_dir, 'out_vis_dir')
    mmcv.mkdir_or_exist(out_vis_dir)
    correct_vis_dir = osp.join(args.out_dir, 'correct')
    mmcv.mkdir_or_exist(correct_vis_dir)
    wrong_vis_dir = osp.join(args.out_dir, 'wrong')
    mmcv.mkdir_or_exist(wrong_vis_dir)
    img_paths, pred_labels, gt_labels = [], [], []

    lines = list_from_file(args.img_list)
    progressbar = ProgressBar(task_num=len(lines))
    num_gt_label = 0
    for line in lines:
        progressbar.update()
        item_list = line.strip().split()
        img_file = item_list[0]
        gt_label = ''
        if len(item_list) >= 2:
            gt_label = item_list[1]
            num_gt_label += 1
        img_path = osp.join(args.img_root_path, img_file)
        if not osp.exists(img_path):
            raise FileNotFoundError(img_path)
        # Test a single image
        result = model_inference(model, img_path)
        pred_label = result['text']

        out_img_name = '_'.join(img_file.split('/'))
        out_file = osp.join(out_vis_dir, out_img_name)
        kwargs_dict = {
            'gt_label': gt_label,
            'show': args.show,
            'out_file': '' if args.show else out_file
        }
        model.show_result(img_path, result, **kwargs_dict)
        if gt_label != '':
            if gt_label == pred_label:
                dst_file = osp.join(correct_vis_dir, out_img_name)
            else:
                dst_file = osp.join(wrong_vis_dir, out_img_name)
            shutil.copy(out_file, dst_file)
        img_paths.append(img_path)
        gt_labels.append(gt_label)
        pred_labels.append(pred_label)

    # Save results
    save_results(img_paths, pred_labels, gt_labels, args.out_dir)

    if num_gt_label == len(pred_labels):
        # eval
        eval_results = eval_ocr_metric(pred_labels, gt_labels)
        logger.info('\n' + '-' * 100)
        info = ('eval on testset with img_root_path '
                f'{args.img_root_path} and img_list {args.img_list}\n')
        logger.info(info)
        logger.info(eval_results)

    print(f'\nInference done, and results saved in {args.out_dir}\n')
Example #8
0
def build(detection_cfg,
          estimation_cfg,
          tracker_cfg,
          video_dir,
          out_dir,
          gpus=1,
          worker_per_gpu=1,
          video_max_length=10000,
          category_annotation=None):

    cache_checkpoint(detection_cfg.checkpoint_file)
    cache_checkpoint(estimation_cfg.checkpoint_file)
    if tracker_cfg is not None:
        raise NotImplementedError

    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    if category_annotation is None:
        video_categories = dict()
    else:
        with open(category_annotation) as f:
            video_categories = json.load(f)['annotations']

    inputs = Manager().Queue(video_max_length)
    results = Manager().Queue(video_max_length)

    num_worker = gpus * worker_per_gpu
    procs = []
    for i in range(num_worker):
        p = Process(target=worker,
                    args=(inputs, results, i % gpus, detection_cfg,
                          estimation_cfg))
        procs.append(p)
        p.start()

    video_file_list = os.listdir(video_dir)
    prog_bar = ProgressBar(len(video_file_list))
    for video_file in video_file_list:
        #print('video_file:',video_file)
        reader = mmcv.VideoReader(os.path.join(video_dir, video_file))
        video_frames = reader[:video_max_length]
        annotations = []
        num_keypoints = -1
        count_frame = 0
        for i, image in enumerate(video_frames):
            #print('input_i:',i)

            if image is None:
                continue

            count_frame += 1

            inputs.put((i, image))
        #print('\ncount_frame:',count_frame)
        #print('len_frame',len(video_frames))
        for i in range(count_frame):
            #print('frame:',i)
            t = results.get()
            #print('t:',t)

            if not t['has_return']:
                continue

            num_person = len(t['joint_preds'])
            assert len(t['person_bbox']) == num_person

            for j in range(num_person):
                keypoints = [[p[0], p[1], round(s[0], 2)] for p, s in zip(
                    t['joint_preds'][j].round().astype(int).tolist(),
                    t['joint_scores'][j].tolist())]
                num_keypoints = len(keypoints)
                person_info = dict(person_bbox=t['person_bbox']
                                   [j].round().astype(int).tolist(),
                                   frame_index=t['frame_index'],
                                   id=j,
                                   person_id=None,
                                   keypoints=keypoints)
                annotations.append(person_info)

        # output results
        annotations = sorted(annotations, key=lambda x: x['frame_index'])
        category_id = video_categories[video_file][
            'category_id'] if video_file in video_categories else -1
        info = dict(video_name=video_file,
                    resolution=reader.resolution,
                    num_frame=len(video_frames),
                    num_keypoints=num_keypoints,
                    keypoint_channels=['x', 'y', 'score'],
                    version='1.0')
        video_info = dict(info=info,
                          category_id=category_id,
                          annotations=annotations)
        with open(os.path.join(out_dir, video_file + '.json'), 'w') as f:
            json.dump(video_info, f)

        prog_bar.update()

    # send end signals
    for p in procs:
        inputs.put((-1, None))
    # wait to finish
    for p in procs:
        p.join()

    print('\nBuild skeleton dataset to {}.'.format(out_dir))
    return video_info
Example #9
0
def inference(detection_cfg,
              estimation_cfg,
              video_file,
              gpus=1,
              worker_per_gpu=1,
              save_dir=None):

    video_frames = mmcv.VideoReader(video_file)
    all_result = []
    print('\nPose estimation:')

    # case for single process
    if gpus == 1 and worker_per_gpu == 1:
        model = init_pose_estimator(detection_cfg, estimation_cfg, device=0)
        prog_bar = ProgressBar(len(video_frames))
        for i, image in enumerate(video_frames):
            res = inference_pose_estimator(model, image)
            res['frame_index'] = i
            if save_dir is not None:
                res['render_image'] = render(image, res['joint_preds'],
                                             res['person_bbox'],
                                             detection_cfg.bbox_thre)
            all_result.append(res)
            prog_bar.update()

    # case for multi-process
    else:
        cache_checkpoint(detection_cfg.checkpoint_file)
        cache_checkpoint(estimation_cfg.checkpoint_file)
        num_worker = gpus * worker_per_gpu
        procs = []
        inputs = Manager().Queue(len(video_frames))
        results = Manager().Queue(len(video_frames))

        for i, image in enumerate(video_frames):
            inputs.put((i, image))

        for i in range(num_worker):
            p = Process(target=worker,
                        args=(inputs, results, i % gpus, detection_cfg,
                              estimation_cfg, save_dir is not None))
            procs.append(p)
            p.start()
        for i in range(len(video_frames)):
            t = results.get()
            all_result.append(t)
            if 'prog_bar' not in locals():
                prog_bar = ProgressBar(len(video_frames))
            prog_bar.update()
        for p in procs:
            p.join()

    # sort results
    all_result = sorted(all_result, key=lambda x: x['frame_index'])

    # generate video
    if (len(all_result) == len(video_frames)) and (save_dir is not None):
        print('\n\nGenerate video:')
        video_name = video_file.strip('/n').split('/')[-1]
        video_path = os.path.join(save_dir, video_name)
        vwriter = cv2.VideoWriter(video_path,
                                  mmcv.video.io.VideoWriter_fourcc(*('mp4v')),
                                  video_frames.fps, video_frames.resolution)
        prog_bar = ProgressBar(len(video_frames))
        for r in all_result:
            vwriter.write(r['render_image'])
            prog_bar.update()
        vwriter.release()
        print('\nVideo was saved to {}'.format(video_path))

    return all_result