Esempio n. 1
0
def hanle_frame(args, frameId, im, logger, model, dataset):
    #out_name = os.path.join(
    #    args.output_dir, '{}'.format(frameId + '.pdf')
    #)
    logger.info('Processing frame: {}'.format(frameId))
    timers = defaultdict(Timer)
    t = time.time()
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
            model, im, None, timers=timers)
    logger.info('Inference time: {:.3f}s'.format(time.time() - t))
    for k, v in timers.items():
        logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
    if frameId == 1:
        logger.info(
            ' \ Note: inference on the first image will be slower than the '
            'rest (caches and auto-tuning need to warm up)')

    vis_utils.vis_one_image(
        im[:, :, ::-1],  # BGR -> RGB for visualization
        '{}'.format(frameId),
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2)
Esempio n. 2
0
    def on_frame(self, im, i, cls_boxes, cls_segms, cls_keyps):
        """
        Args:
            im - the image frame
            i - the index of the image in the sequence
            cls_boxes - detected boxes (of person class)
            cls_segms - detected segments
            cls_keyps - detected key points
        """

        print("\n\n\n\nvis[{}] to {}".format(i, self.output_dir))

        vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            "frame {}".format(i),
            self.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=self.dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
        )
Esempio n. 3
0
def main(args):
    logger = logging.getLogger(__name__)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = load_cfg(envu.yaml_dump(cfg))
    im = cv2.imread(args.im_file)

    if args.rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps, cls_bodys = None, None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        cfg.immutable(False)
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            weights_file = pkl
        else:
            weights_file = cfg.TEST.WEIGHTS
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg(cache_urls=False)
        model = model_engine.initialize_model_from_cfg(weights_file)
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ , cls_bodys_= \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        cls_bodys = cls_bodys_ if cls_bodys_ is not None else cls_bodys

        workspace.ResetWorkspace()

    out_name = os.path.join(
        args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf'))
    logger.info('Processing {} -> {}'.format(args.im_file, out_name))

    with open('test_vis.pkl', 'w') as f:
        pickle.dump(
            {
                'im': im,
                'cls_boxes': np.array(cls_boxes),
                'cls_bodys': np.array(cls_bodys)
            }, f)

    vis_utils.vis_one_image(im[:, :, ::-1],
                            args.im_file,
                            args.output_dir,
                            cls_boxes,
                            cls_segms,
                            cls_keyps,
                            cls_bodys,
                            dataset=dummy_coco_dataset,
                            box_alpha=0.3,
                            show_class=True,
                            thresh=0.7,
                            kp_thresh=2)
Esempio n. 4
0
def main(args):
    logger = logging.getLogger(__name__)
    dummy_nucoco_dataset = dummy_datasets.get_nucoco_dataset()
    cfg_orig = load_cfg(envu.yaml_dump(cfg))
    
    ## Load image
    coco = COCO_PLUS(args.ann_file, args.imgs_dir)
    image_id = coco.dataset['images'][args.im_ind]['id']
    img_path = os.path.join(args.imgs_dir, coco.imgs[image_id]["file_name"])
    im = cv2.imread(img_path)

    ## Get the proposals for this image
    proposals = rrpn_loader(args.rpn_pkl)
    proposal_boxes = proposals[image_id]['boxes']
    _proposal_scores = proposals[image_id]['scores']
    workspace.ResetWorkspace()

    ## run models
    cls_boxes, cls_segms, cls_keyps = None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        cfg.immutable(False)
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            weights_file = pkl
        else:
            weights_file = cfg.TEST.WEIGHTS
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg(cache_urls=False)
        model = model_engine.initialize_model_from_cfg(weights_file)
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ = \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        workspace.ResetWorkspace()

    out_name = os.path.join(
        args.output_dir, '{}'.format(os.path.basename(img_path) + '.pdf')
    )
    logger.info('Processing {} -> {}'.format(img_path, out_name))

    vis_utils.vis_one_image(
        im[:, :, ::-1],
        img_path,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_nucoco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2
    )
Esempio n. 5
0
def main(args):
    logger = logging.getLogger(__name__)

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    for i, weights_file in enumerate(args.weights_list):
        args.weights_list[i] = cache_url(weights_file, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)

    assert not cfg.MODEL.RPN_ONLY, \
        'RPN models are not supported'
    assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
        'Models that require precomputed proposals are not supported'

    preffix_list = args.preffix_list if len(
        args.preffix_list) else [""] * len(args.weights_list)
    model = infer_engine.initialize_mixed_model_from_cfg(
        args.weights_list, preffix_list=preffix_list)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir,
            '{}'.format(os.path.basename(im_name) + '.' + args.output_ext))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=args.thresh,
            kp_thresh=args.kp_thresh,
            ext=args.output_ext,
            out_when_no_box=args.out_when_no_box)
Esempio n. 6
0
def main(args):
    logger = logging.getLogger(__name__)

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)

    assert not cfg.MODEL.RPN_ONLY, \
        'RPN models are not supported'
    assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
        'Models that require precomputed proposals are not supported'

    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.' + args.output_ext)
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=args.thresh,
            kp_thresh=args.kp_thresh,
            ext=args.output_ext,
            out_when_no_box=args.out_when_no_box
        )
Esempio n. 7
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        f = open(os.path.join(args.output_dir,'test_vis_{}'.format(os.path.basename(im_name).split('.')[0])) + '.pkl' ,'w')
        pickle.dump({'im':im, 'kp':np.array(cls_keyps) },f)
        f.close()

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )
Esempio n. 8
0
def main(args):
    logger = logging.getLogger(__name__)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = load_cfg(yaml.dump(cfg))
    im = cv2.imread(args.im_file)

    if args.rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps = None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        cfg.immutable(False)
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            weights_file = pkl
        else:
            weights_file = cfg.TEST.WEIGHTS
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg(cache_urls=False)
        model = model_engine.initialize_model_from_cfg(weights_file)
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ = \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        workspace.ResetWorkspace()

    out_name = os.path.join(
        args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf')
    )
    logger.info('Processing {} -> {}'.format(args.im_file, out_name))

    vis_utils.vis_one_image(
        im[:, :, ::-1],
        args.im_file,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2
    )
Esempio n. 9
0
def vis(dataset, detections_pkl, thresh, output_dir, limit=0):
    ds = JsonDataset(dataset)
    roidb = ds.get_roidb()

    with open(detections_pkl, 'rb') as f:
        dets = pickle.load(f)

    assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \
        'Expected detections pkl file in the format used by test_engine.py'

    all_boxes = dets['all_boxes']
    all_segms = dets['all_segms']
    all_keyps = dets['all_keyps']

    def id_or_index(ix, val):
        if len(val) == 0:
            return val
        else:
            return val[ix]

    for ix, entry in enumerate(roidb):
        if limit > 0 and ix >= limit:
            break
        if ix % 10 == 0:
            print('{:d}/{:d}'.format(ix + 1, len(roidb)))

        im = cv2.imread(entry['image'])
        im_name = os.path.splitext(os.path.basename(entry['image']))[0]

        cls_boxes_i = [
            id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
        ]
        cls_segms_i = [
            id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms
        ]
        cls_keyps_i = [
            id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps
        ]

        vis_utils.vis_one_image(
            im[:, :, ::-1],
            '{:d}_{:s}'.format(ix, im_name),
            os.path.join(output_dir, 'vis'),
            cls_boxes_i,
            segms=cls_segms_i,
            keypoints=cls_keyps_i,
            thresh=thresh,
            box_alpha=0.8,
            dataset=ds,
            show_class=True
        )
Esempio n. 10
0
def vis(dataset, detections_pkl, thresh, output_dir, limit=0):
    ds = JsonDataset(dataset)
    roidb = ds.get_roidb()

    with open(detections_pkl, 'r') as f:
        dets = pickle.load(f)

    assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \
        'Expected detections pkl file in the format used by test_engine.py'

    all_boxes = dets['all_boxes']
    all_segms = dets['all_segms']
    all_keyps = dets['all_keyps']

    def id_or_index(ix, val):
        if len(val) == 0:
            return val
        else:
            return val[ix]

    for ix, entry in enumerate(roidb):
        if limit > 0 and ix >= limit:
            break
        if ix % 10 == 0:
            print('{:d}/{:d}'.format(ix + 1, len(roidb)))

        im = cv2.imread(entry['image'])
        im_name = os.path.splitext(os.path.basename(entry['image']))[0]

        cls_boxes_i = [
            id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
        ]
        cls_segms_i = [
            id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms
        ]
        cls_keyps_i = [
            id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps
        ]

        vis_utils.vis_one_image(
            im[:, :, ::-1],
            '{:d}_{:s}'.format(ix, im_name),
            os.path.join(output_dir, 'vis'),
            cls_boxes_i,
            segms=cls_segms_i,
            keypoints=cls_keyps_i,
            thresh=thresh,
            box_alpha=0.8,
            dataset=ds,
            show_class=True
        )
def vis(dataset, roidb):
    tmp = []
    for i, entry in enumerate(roidb):
        tmp.append(entry)
    tmp.sort(cmp=compare)

    file_name = '/home/wushujie/vis/result_scale_mean.txt'
    with open(file_name, 'wb') as file_object:
        for entry in tmp:
            file_object.write(entry['image'])
            file_object.write(str(entry['entropy']))
            file_object.write('\n')

    max_img = tmp[:20]
    min_img = tmp[-20:]
    for i, entry in enumerate(max_img):
        im = cv2.imread(entry['image'])
        im_name = str(i)  # +'_'+entry['file_name']
        output_dir = '/home/wushujie/vis/max_scale_mean'
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            output_dir,
            entry['result'],
            None,
            None,
            dataset=dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            ext='png',
            out_when_no_box=True)
    for i, entry in enumerate(min_img):
        im = cv2.imread(entry['image'])
        im_name = str(i)
        output_dir = '/home/wushujie/vis/min_scale_mean'
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            output_dir,
            entry['result'],
            None,
            None,
            dataset=dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            ext='png',
            out_when_no_box=True)
Esempio n. 12
0
def inference(cfg_path, weights, img_pillow, output_dir):
    logger = logging.getLogger(__name__)

    merge_cfg_from_file(cfg_path)
    #print( "cfg : ", cfg )
    assert_and_infer_cfg(cache_urls=False, make_immutable=False)

    cfg.NUM_GPUS = 1
    weights = cache_url(weights, cfg.DOWNLOAD_CACHE)
    model = infer_engine.initialize_model_from_cfg(weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    im_name = "test"
    #img_cv = cv2.imread(im_name)
    img_np = np.asarray(img_pillow)
    img_cv = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)

    timers = defaultdict(Timer)
    t = time.time()
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
            model, img_cv, None, timers=timers)

    logger.info('Inference time: {:.3f}s'.format(time.time() - t))
    for k, v in timers.items():
        logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

    vis_utils.vis_one_image(
        img_cv[:, :, ::-1],  # BGR -> RGB for visualization
        im_name,
        output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        cls_bodys,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2)

    IUV_SaveName = os.path.basename(im_name).split('.')[0] + '_IUV.png'
    INDS_SaveName = os.path.basename(im_name).split('.')[0] + '_INDS.png'
    iuv_pillow = Image.open(os.path.join(output_dir,
                                         '{}'.format(IUV_SaveName)))
    inds_pillow = Image.open(
        os.path.join(output_dir, '{}'.format(INDS_SaveName)))
    return iuv_pillow, inds_pillow
Esempio n. 13
0
    def __getitem__(self, idx):
        im_name = 'DensePoseData/demo_data/xyz.jpg'
        output_dir = 'DensePoseData/'
        img_name1 = os.path.join(self.root_dir1, self.landmarks_frame.iloc[idx,
                                                                           0])
        image1 = cv2.imread(img_name1)
        img_name2 = os.path.join(self.root_dir2, self.landmarks_frame.iloc[idx,
                                                                           1])
        image2 = cv2.imread(img_name2)
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                self.model, image1, None, timers=self.timers)
        im1 = vis_utils.vis_one_image(
            image1[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                self.model, image2, None, timers=self.timers)
        im2 = vis_utils.vis_one_image(
            image2[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        image3 = cv2.merge((image1, im1, im2))
        sample = {'image1': image1, 'image2': image2, 'image3': image3}

        if self.transform:
            sample = self.transform(sample)

        return sample
Esempio n. 14
0
def main(args):
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)

    assert not cfg.MODEL.RPN_ONLY, \
        'RPN models are not supported'
    assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
        'Models that require precomputed proposals are not supported'

    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, 'detection_' + '{}'.format(os.path.basename(im_name))
        )
        print('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        #print('Inference time: {:.3f}s'.format(time.time() - t))
        #for k, v in timers.items():
        #    print(' | {}: {:.3f}s'.format(k, v.average_time))
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            out_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=args.thresh,
            kp_thresh=args.kp_thresh,
            out_when_no_box=args.out_when_no_box
        )
Esempio n. 15
0
def extract_iuv(image_path, model, infer_engine):
    im = cv2.imread(image_path)

    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
            model, im, None
        )

    iuv_out = vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            None,
            None,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )

    return iuv_out
Esempio n. 16
0
def single_process(args, dummy_coco_dataset, im, im_name, model):
    timers = defaultdict(Timer)
    t = time.time()
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
            model, im, None, timers=timers)
    logger.info('Inference time: {:.3f}s'.format(time.time() - t))
    for k, v in timers.items():
        logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
    # if i == 0:
    #     logger.info(
    #         ' \ Note: inference on the first image will be slower than the '
    #         'rest (caches and auto-tuning need to warm up)'
    #     )
    print("预测结果:", cls_boxes, cls_segms, cls_keyps)
    new_img = vis_utils.vis_one_image(
        im[:, :, ::-1],  # BGR -> RGB for visualization
        im_name,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=args.thresh,
        kp_thresh=args.kp_thresh,
        ext=args.output_ext,
        out_when_no_box=args.out_when_no_box)
    return new_img
 def draw_bbox(self, im, cls_boxes):
     vis_utils.vis_one_image(
         im[:, :, ::-1],  # BGR -> RGB for visualization
         'bbox',
         '/tmp/',
         cls_boxes,
         None,
         None,
         dataset=self.dummy_coco_dataset,
         box_alpha=0.3,
         show_class=True,
         #thresh=0.7,
         thresh=self.SCORE_THRESH,
         kp_thresh=2,
         ext='png',
         out_when_no_box=True)
     return cv2.imread('/tmp/bbox.png')[:, :, ::-1]
Esempio n. 18
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    print("capturing video " + args.input)
    cap = cv2.VideoCapture(args.input)
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    # pdb.set_trace()
    grab = 1
    if (cap.isOpened() == False):
        print("Error opening video stream or file")
        exit
    while (cap.isOpened() and grab <= total_frames):
        grab += 1
        ret_val, im = cap.read()
        #skips intermediate frames
        #if grab%2 !=0:
        #    continue
        #uncomment to resize image
        #im = cv2.resize(im, (int(1280/1),int(720/1)))
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        output_name = 'frame' + str(grab).zfill(4) + '.mp4'
        print("| Analysed frame {0} / {1}  in {2}ms".format(
            grab, total_frames, int(1000. * (time.time() - t))))
        #print('\t | Inference time: {:.3f}s'.format(time.time() - t))
        #for k, v in timers.items():
        #    print('\t | {}: {:.3f}s'.format(k, v.average_time))
        ret = vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            output_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=False,
            thresh=0.7,
            kp_thresh=2)

    cap.release()
    cv2.destroyAllWindows()
Esempio n. 19
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    h5py_file_path = ('/media/hdd1/tanya/open-pose/'
                      'paired_filenames512_image_keypoints512_main.h5')
    hf = h5py.File(h5py_file_path, 'r')

    IUV_image_list = []


    for i in range(len(hf['images'])):
        im = hf['images'][i]
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        IUV_image = vis_utils.vis_one_image(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                'dummy',
                args.output_dir,
                cls_boxes,
                cls_segms,
                cls_keyps,
                cls_bodys,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
        IUV_image_list.append(IUV_image)
    IUV_images_final = np.stack(IUV_image_list, 0)
    with h5py.File('./paired_filenames512_image_keypoints_withIUV.h5', 'w') as f:
        f.create_dataset('images', data=np.array(hf['images']))
        f.create_dataset('keypoints', data=np.array(hf['keypoints']))
        f.create_dataset('IUV', data=IUV_images_final)
Esempio n. 20
0
 def run(im, im_name, output_dir, ):
     timers = defaultdict(Timer)
     t = time.time()
     with c2_utils.NamedCudaScope(0):
         cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
             self.model, im, None, timers=timers
         )
     vis_utils.vis_one_image(
         im[:, :, ::-1],  # BGR -> RGB for visualization
         im_name,
         output_dir,
         cls_boxes,
         cls_segms,
         cls_keyps,
         cls_bodys,
         dataset=self.dummy_coco_dataset,
         box_alpha=0.3,
         show_class=True,
         thresh=0.7,
         kp_thresh=2
     )
     
def main(args, model, logger):

    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    im_list = ["DensePoseData/demo_data/demo_im.jpg"]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dataset_name = cfg.TEST.DATASETS[0]
    dummy_coco_dataset = JsonDataset(dataset_name)
    # dummy_coco_dataset = dummy_datasets.get_paris_dataset()

    vid_dir = '/coco/paris_dataset/PARIS_demo.mp4'
    cap = cv2.VideoCapture(vid_dir)
    ret, im = cap.read()
    count = 0
    while ret:
        im_name = str(count)
        out_name = os.path.join(args.output_dir,
                                '{}'.format(str(count) + '.jpg'))
        logger.info('Processing frame -> {}'.format(count))
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        vis_im = vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        # cv2.imshow('frame', vis_im)
        # cv2.waitKey(10)

        ret, im = cap.read()
        count += 1
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 23
0
def main(args):
    logger = logging.getLogger(__name__)

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)

    assert not cfg.MODEL.RPN_ONLY, \
        'RPN models are not supported'
    assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
        'Models that require precomputed proposals are not supported'

    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    jsonboxes = []
    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir,
            '{}'.format(os.path.basename(im_name) + '.' + args.output_ext))
        file_name = os.path.basename(im_name)
        (image_name, ext) = os.path.splitext(os.path.basename(im_name))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)

        boxes, segms, keypoints, classes = convert_from_cls_format(
            cls_boxes, cls_segms, cls_keyps)
        jsonboxes_perimage = {
            'boxes': boxes,
            'classes': classes,
            'image_id': int(image_name),
            'file_name': file_name
        }
        jsonboxes_perimage['boxes'] = jsonboxes_perimage['boxes'].tolist()
        jsonboxes.append(jsonboxes_perimage)

        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=args.thresh,
            kp_thresh=args.kp_thresh,
            ext=args.output_ext,
            out_when_no_box=args.out_when_no_box)

    Dataset_name = args.dataset_name
    direction = '/home/tecnimaq/Gabriela/Detectron/json_dump'
    jsonboxesfile = 'jsonboxes_' + Dataset_name + '.json'
    with open(os.path.join(direction, jsonboxesfile), 'w') as thisfile:
        json.dump(jsonboxes, thisfile)
    dir = os.path.join(direction, jsonboxesfile)
    info.get_info(dir, Dataset_name)
Esempio n. 24
0
def test_net(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    ind_range=None,
    gpu_id=0
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range
    )
    model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
    
#    pose_pred_model = generate_poseJPPNet_pred_model()
    
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):
        if cfg.TEST.PRECOMPUTED_PROPOSALS:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue
        else:
            # Faster R-CNN type models generate proposals on-the-fly with an
            # in-network RPN; 1-stage models don't require proposals.
            box_proposals = None

        im = cv2.imread(entry['image'])
        with c2_utils.NamedCudaScope(gpu_id):
            cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                model, im, box_proposals, timers, entry
            )
#            test_res_top.feedBlob_run(model, im, entry)
            mask_res_top = test_res_top.res_top_result(model, entry)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (
                timers['im_detect_bbox'].average_time +
                timers['im_detect_mask'].average_time +
                timers['im_detect_keypoints'].average_time
            )
            misc_time = (
                timers['misc_bbox'].average_time +
                timers['misc_mask'].average_time +
                timers['misc_keypoints'].average_time
            )
            logger.info(
                (
                    'im_detect: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, det_time, misc_time, eta
                )
            )

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            mask_png_20 = vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis_9k'),
                cls_boxes_i,
                segms=cls_segms_i,
                keypoints=cls_keyps_i,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True
            )
            # fusion
            mask_fusion = mask_png_20 + mask_res_top
            mask_fusion_out = os.path.join(output_dir, 'fusion')
            if not os.path.exists(mask_fusion_out):
                os.makedirs(mask_fusion_out)
#            logger.info('fusion save to {}'.format(mask_fusion_out))
            cv2.imwrite(os.path.join(mask_fusion_out, entry['id']+'.png'), mask_fusion.argmax(0))

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
Esempio n. 25
0
def main(args):
    glob_keypoints = []

    logger = logging.getLogger(__name__)

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)

    assert not cfg.MODEL.RPN_ONLY, \
        'RPN models are not supported'
    assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
        'Models that require precomputed proposals are not supported'

    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*' + '.png')
    else:
        im_list = [args.im_or_folder]
    im_list = sorted(im_list)
    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.' + args.output_ext)
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=args.thresh,
            kp_thresh=args.kp_thresh,
            ext=args.output_ext,
            out_when_no_box=args.out_when_no_box
        )

        cls_boxes_np = np.asarray(cls_boxes)
        cls_boxes_prob = cls_boxes_np[1][:,4]
        idx_max_prob = np.argmax(cls_boxes_prob)

        cls_keyps_max_prob = cls_keyps[1][idx_max_prob]
        pose_x_y_prob_after_softmax = cls_keyps_max_prob[[0,1,3]]
        glob_keypoints.append(np.transpose(pose_x_y_prob_after_softmax))

    dictionarry_keypoints={'S1': {'Directions 1' : np.asarray([glob_keypoints])}}
    metadata = {'layout_name': 'h36m', 'num_joints': 17, 'keypoints_symmetry': [[4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]]}
    #np.savez(os.path.join('/home/narvis/Dev/VideoPose3D/data', "data_2d_detections.npz"), metadata=metadata, positions_2d=dictionarry_keypoints)
    np.savez(os.path.join(args.output_dir, "data_2d_detections.npz"), metadata=metadata, positions_2d=dictionarry_keypoints)
Esempio n. 26
0
def main(args):
    global dot_count
    dot_count = 0
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    host = '127.0.0.1'
    port = 9998
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.bind((host, port))
    s.listen(5)
    s.settimeout(2)
    SIZE = 4096
    #s.settimeout(10)
    multiTracker = cv2.MultiTracker_create()
    while True:
        try:
            sock, addr = s.accept()
            receive = sock.recv(SIZE).decode()
            tmp = str(receive).split("|")
            path = tmp[0]
            device_addr = tmp[1]
            print("path: {0}, device_addr: {1}".format(path, device_addr))
            im_list = []
            im_list.append(path)
            for i, im_name in enumerate(im_list):
                out_name = os.path.join(args.output_dir,
                                        '{}'.format(os.path.basename(im_name)))
                try:
                    logger.info('Processing {} -> {}'.format(
                        im_name, out_name))
                    im = cv2.imread(im_name)
                    timers = defaultdict(Timer)
                    t = time.time()
                    with c2_utils.NamedCudaScope(0):
                        cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                            model, im, None, timers=timers)
                    logger.info('Inference time: {:.3f}s'.format(time.time() -
                                                                 t))
                    for k, v in timers.items():
                        logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
                    if i == 0:
                        logger.info(
                            ' \ Note: inference on the first image will be slower than the '
                            'rest (caches and auto-tuning need to warm up)')

                    vis_utils.vis_one_image(
                        im[:, :, ::-1],  # BGR -> RGB for visualization
                        im_name,
                        args.output_dir,
                        cls_boxes,
                        cls_segms,
                        cls_keyps,
                        cls_bodys,
                        dataset=dummy_coco_dataset,
                        box_alpha=0.3,
                        show_class=True,
                        thresh=0.7,
                        kp_thresh=2)
                    print("len(cls_boxes[1])-1: {0}".format(
                        len(cls_boxes[1]) - 1))

                except:
                    print("fail in read: {0}".format(out_name))

                count_people = 0
                now_boxes = []
                now_center = []
                try:
                    for i in range(len(cls_boxes[1])):
                        if cls_boxes[1][i][4] > 0.7:
                            now_boxes.append(cls_boxes[1][i][:4])
                            now_center.append([
                                int((cls_boxes[1][i][0] + cls_boxes[1][i][2])
                                    // 2),
                                int((cls_boxes[1][i][1] + cls_boxes[1][i][3])
                                    // 2)
                            ])
                            count_people = count_people + 1
                except:
                    count_people = 0
                print("now_center: {0}".format(now_center))
                print("count_people: {0}".format(count_people))
                ans_command = str(count_people) + " "
                for i in range(int(count_people)):
                    ans_command = ans_command + str(
                        now_center[i][0]) + "," + str(now_center[i][1]) + ","
                ans_command = ans_command.strip(",")
                print(ans_command)
                sock.send(ans_command.encode())
                im_name = ""
        except Exception as e:
            print('                                                 ',
                  end='\r')
            error_text = "Exception: " + str(e) + ", reconnecting "
            for i in range(dot_count):
                error_text = error_text + "."
            dot_count = dot_count + 1
            if dot_count > 3:
                dot_count = 0
            print(error_text, end='\r')
    s.close()
Esempio n. 27
0
def infer_method(im, mymethod="back"):
    logger = logging.getLogger(__name__)
    #styleimage = style.style_method()
    merge_cfg_from_file(
        "configs/DensePoseKeyPointsMask_ResNet50_FPN_s1x-e2e.yaml")
    cfg.NUM_GPUS = 1
    myweights = cache_url("DensePoseKeyPointsMask_ResNet50_FPN_s1x-e2e.pkl",
                          cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(myweights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    out_name = os.path.join("tools/output",
                            '{}'.format(os.path.basename("myresult") + '.jpg'))
    #logger.info('Processing {} -> {}'.format(im_name, out_name))
    im_name = "love.jpg"
    im2 = cv2.imread("tools/iron8.jpg")
    timers = defaultdict(Timer)
    t = time.time()

    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
            model, im, None, timers=timers)
        if im2 is not None:
            cls_boxes2, cls_segms2, cls_keyps2, cls_bodys2 = infer_engine.im_detect_all(
                model, im2, None, timers=timers)

        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        if mymethod == "back":
            vis_utils.change_background(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                im2[:, :, ::-1],
                im_name,
                "static/img",
                cls_boxes,
                cls_segms,
                cls_keyps,
                cls_bodys,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
        elif mymethod == "iron":
            vis_utils.ironman(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                im2[:, :, ::-1],
                im_name,
                args.output_dir,
                cls_boxes,
                cls_boxes2,
                cls_segms,
                cls_keyps,
                cls_bodys,
                cls_segms2,
                cls_keyps2,
                cls_bodys2,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
        elif mymethod == 'style_b':
            styleimage = cv2.cvtColor(
                numpy.array(style.stylize_img(im_name, args.image_second)),
                cv2.COLOR_RGB2BGR)
            resized_im = style.tensor_to_image(style.load_to_mask(im_name))
            opencvImage = cv2.cvtColor(numpy.array(resized_im),
                                       cv2.COLOR_RGB2BGR)
            print(opencvImage)
            with c2_utils.NamedCudaScope(0):
                bo, se, ke, bod = infer_engine.im_detect_all(model,
                                                             opencvImage,
                                                             None,
                                                             timers=timers)
            vis_utils.change_background(
                opencvImage[:, :, ::-1],  # BGR -> RGB for visualization
                styleimage[:, :, ::-1],
                "stylized_img.jpg",
                args.output_dir,
                bo,
                se,
                ke,
                bod,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
        else:
            vis_utils.vis_one_image(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                im_name,
                args.output_dir,
                cls_boxes,
                cls_segms,
                cls_keyps,
                cls_bodys,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
Esempio n. 28
0
def densepose_coach(args):
    # args = parse_args()
    # print(args)
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    # if os.path.isdir(args.video) #

    im_list = video2imgs(args.video)

    # if os.path.isdir(args.im_or_folder):
    #     im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    # else:
    #     im_list = [args.im_or_folder]

    # vis_imgshape = vis_img.shape
    # size = (vis_imgshape[1], vis_imgshape[0])
    # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    # size = (1600,900)
    # videoWriter = cv2.VideoWriter(args.output_dir + os.path.basename(args.video), fourcc, 20, size)
    # time_start = time.time()

    for i, im in enumerate(im_list):
        # out_name = os.path.join(
        #     args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        # )
        # logger.info('Processing {} -> {}'.format(im_name, out_name))
        # im = cv2.imread(im_name)
        # im = im[0:480, 210:430]   #裁剪坐标为[y0:y1, x0:x1]
        if i % 6 == 0:
            timers = defaultdict(Timer)
            t = time.time()
            with c2_utils.NamedCudaScope(0):
                cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                    model, im, None, timers=timers)
            ##############################20190123
            box_list = [b for b in cls_boxes if len(b) > 0]
            if len(box_list) > 0:
                boxes = np.concatenate(box_list)
            else:
                boxes = None
            if cls_keyps is not None:
                keyps = [k for klist in cls_keyps for k in klist]
            else:
                keyps = None
            a = np.argmax(boxes, axis=0)
            j = a[4]
            kps = keyps[j]
            kpss = np.expand_dims(kps, axis=0)
            if i == 0:
                kpsstack = kpss
            else:
                kpsstack = np.vstack((kpsstack, kpss))

            # if i == 0:
            #     kpss = kps
            # elif i == 1:
            #     kpss2 = kps - kpss
            #     kpss = kps
            #     kpsss = np.expand_dims(kpss2, axis=0)
            #     kpsstack = kpsss
            # else:
            #     kpss2 = kps - kpss
            #     kpss = kps
            #     kpsss = np.expand_dims(kpss2, axis=0)
            #     kpsstack = np.vstack((kpsstack, kpsss))
            # mat_path = '/home/server010/zhoukaiye/jianshen/jianshenshuju/test/coordinate-161441-keyps.mat'
            # io.savemat(mat_path, {'name':kps})
            ###########
            # with open("/home/server010/zhoukaiye/jianshen/jianshenshuju/test/coordinates2.txt", "w") as f:
            #     for kp in cls_keyps:
            #         f.write(str(kp))
            # mat_path = '/home/server010/zhoukaiye/jianshen/jianshenshuju/test/coordinate-161441.mat'
            # io.savemat(mat_path, {'name':cls_keyps})
            ############
            # IPython.embed()
            # logger.info('Inference time: {:.3f}s'.format(time.time() - t))
            # for k, v in timers.items():
            #     logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
            # if i == 0:
            #     logger.info(
            #         ' \ Note: inference on the first image will be slower than the '
            #         'rest (caches and auto-tuning need to warm up)'
            #     )
            # if i % 5 == 0:
            All_Coords = vis_utils.vis_one_image(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                cls_boxes,
                None,  # im_name,           
                None,  # args.output_dir,      
                cls_segms,
                cls_keyps,
                cls_bodys,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
            if cls_keyps is None:
                vis_img = visualize(im, All_Coords)
            else:
                # print('keypoint')
                vis_img = All_Coords

            if i == 0:
                fourcc = cv2.VideoWriter_fourcc(*'mp4v'.encode('utf-8'))
                # fourcc = cv2.VideoWriter_fourcc(*'MP4V')
                #fourcc = cv2.VideoWriter_fourcc(*'XVID')
                vis_imgshape = vis_img.shape
                size = (vis_imgshape[1], vis_imgshape[0])
                videoWriter = cv2.VideoWriter(
                    args.output_dir +
                    os.path.basename(args.video).split('.')[0] + '.mp4',
                    fourcc, 25, size)
                # videoWriter.write(vis_img)
            # IUV_list.append(vis_img)
            #imgs2video

            # videoWriter = cv2.VideoWriter(args.output_dir + '/zuoqianceyang5.mp4', fourcc, 20, size)
            ## 5 qu 1
            # if i % 5 == 0:
            videoWriter.write(vis_img)
    # mat_path = '/home/server010/server010/FitNess/Video_capture/video_out/yingla_test2.mat'
    # io.savemat(mat_path, {'name':kpsstack})
    videoWriter.release()

    data1 = kpsstack[:, 0:2, :]
    classify_action, count_action, maxList, maxheight, start_frame = classify_count(
        data1)
    return classify_action, count_action, maxList, maxheight, start_frame, data1
Esempio n. 29
0
def test_net_batch(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    ind_range=None,
    gpu_id=0
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU, using batch inference
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range
    )
    # roidb = roidb[:500]
    # Debug purposes
    # roidb = [{'image': im} for im in glob.glob('/staging/leuven/stg_00027/imob/detectron/lib/datasets/data/coco/coco_val2014/*.png')][:500]

    model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)

    ims = []
    for i, entry in enumerate(roidb):
        if cfg.TEST.PRECOMPUTED_PROPOSALS:
            raise NotImplementedError('Precomputed proposals not implemented for batch inference, set TEST.IMS_PER_BATCH to 1')
        else:
            # Faster R-CNN type models generate proposals on-the-fly with an
            # in-network RPN; 1-stage models don't require proposals.
            box_proposals = None

        # im = cv2.imread(roidb[0]['image'])
        im = cv2.imread(entry['image'])
        ims.append(im)

        if not ((len(ims) == cfg.TEST.IMS_PER_BATCH) or (i == (num_images - 1))):
            continue

        with c2_utils.NamedCudaScope(gpu_id):
            cls_boxes_batch, cls_segms_batch, cls_keyps_batch = im_detect_all_batch(
                model, ims, box_proposals, timers
            )

        for n in range(len(ims)):
            local_i = i - len(ims) + n + 1
            cls_boxes_i = cls_boxes_batch[n]
            cls_segms_i = cls_segms_batch[n] if cls_segms_batch else None
            cls_keyps_i = cls_keyps_batch[n] if cls_keyps_batch else None
            extend_results(local_i, all_boxes, cls_boxes_i)
            if cls_segms_i is not None:
                extend_results(local_i, all_segms, cls_segms_i)
            if cls_keyps_i is not None:
                extend_results(local_i, all_keyps, cls_keyps_i)

            if local_i % (10 * cfg.TEST.IMS_PER_BATCH) == 0:  # Reduce log file size
                ave_total_time = np.sum([t.average_time for t in timers.values()])
                eta_seconds = int(ave_total_time * (num_images - local_i - 1) / float(cfg.TEST.IMS_PER_BATCH))
                eta = str(datetime.timedelta(seconds=int(eta_seconds)))
                det_time = (
                    timers['im_detect_bbox'].average_time +
                    timers['im_detect_mask'].average_time +
                    timers['im_detect_keypoints'].average_time
                )
                misc_time = (
                    timers['misc_bbox'].average_time +
                    timers['misc_mask'].average_time +
                    timers['misc_keypoints'].average_time
                )
                logger.info(
                    (
                        'im_detect: range [{:d}, {:d}] of {:d}: '
                        '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                    ).format(
                        start_ind + 1, end_ind, total_num_images, start_ind + local_i + 1,
                        start_ind + num_images, det_time, misc_time, eta
                    )
                )

            # This will now only show the last image of each batch
            if cfg.VIS:
                im_name = os.path.splitext(os.path.basename(entry['image']))[0]
                vis_utils.vis_one_image(
                    im[:, :, ::-1],
                    '{:d}_{:s}'.format(i, im_name),
                    os.path.join(output_dir, 'vis'),
                    cls_boxes_i,
                    segms=cls_segms_i,
                    keypoints=cls_keyps_i,
                    thresh=cfg.VIS_TH,
                    box_alpha=0.8,
                    dataset=dataset,
                    show_class=True
                )
        ims = []

    cfg_yaml = envu.yaml_dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
Esempio n. 30
0
def test_net(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    ind_range=None,
    gpu_id=0
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range
    )
    model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps, all_bodys = \
        empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):
        if 'has_no_densepose' in entry.keys():
            pass
        else:
            if cfg.TEST.PRECOMPUTED_PROPOSALS:
                # The roidb may contain ground-truth rois (for example, if the roidb
                # comes from the training or val split). We only want to evaluate
                # detection on the *non*-ground-truth rois. We select only the rois
                # that have the gt_classes field set to 0, which means there's no
                # ground truth.
                box_proposals = entry['boxes'][entry['gt_classes'] == 0]
                if len(box_proposals) == 0:
                    continue
            else:
                # Faster R-CNN type models generate proposals on-the-fly with an
                # in-network RPN; 1-stage models don't require proposals.
                box_proposals = None

            im = cv2.imread(entry['image'])
            with c2_utils.NamedCudaScope(gpu_id):
                cls_boxes_i, cls_segms_i, cls_keyps_i,cls_bodys_i = \
                    im_detect_all(model, im, box_proposals, timers)

            extend_results(i, all_boxes, cls_boxes_i)
            if cls_segms_i is not None:
                extend_results(i, all_segms, cls_segms_i)
            if cls_keyps_i is not None:
                extend_results(i, all_keyps, cls_keyps_i)
            if cls_bodys_i is not None:
                extend_results(i, all_bodys, cls_bodys_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (
                timers['im_detect_bbox'].average_time +
                timers['im_detect_mask'].average_time +
                timers['im_detect_keypoints'].average_time +
                timers['im_detect_body_uv'].average_time
            )
            misc_time = (
                timers['misc_bbox'].average_time +
                timers['misc_mask'].average_time +
                timers['misc_keypoints'].average_time +
                timers['misc_body_uv'].average_time
            )
            logger.info(
                (
                    'im_detect: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, det_time, misc_time, eta
                )
            )

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis'),
                cls_boxes_i,
                segms=cls_segms_i,
                keypoints=cls_keyps_i,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True
            )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            all_bodys=all_bodys,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps, all_bodys
Esempio n. 31
0
def test_net(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    ind_range=None,
    gpu_id=0
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    if cfg.TEST.IMS_PER_BATCH != 1:
        return test_net_batch(weights_file, dataset_name, proposal_file, output_dir, ind_range, gpu_id)

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range
    )
    # roidb = roidb[:500]

    # roidb = [{'image': im} for im in glob.glob('/staging/leuven/stg_00027/imob/detectron/lib/datasets/data/coco/coco_val2014/*.png')][:500]

    model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):
        if cfg.TEST.PRECOMPUTED_PROPOSALS:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue
        else:
            # Faster R-CNN type models generate proposals on-the-fly with an
            # in-network RPN; 1-stage models don't require proposals.
            box_proposals = None

        im = cv2.imread(entry['image'])
        with c2_utils.NamedCudaScope(gpu_id):
            cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                model, im, box_proposals, timers
            )

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (
                timers['im_detect_bbox'].average_time +
                timers['im_detect_mask'].average_time +
                timers['im_detect_keypoints'].average_time
            )
            misc_time = (
                timers['misc_bbox'].average_time +
                timers['misc_mask'].average_time +
                timers['misc_keypoints'].average_time
            )
            logger.info(
                (
                    'im_detect: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, det_time, misc_time, eta
                )
            )

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis'),
                cls_boxes_i,
                segms=cls_segms_i,
                keypoints=cls_keyps_i,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True
            )

    cfg_yaml = envu.yaml_dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
Esempio n. 32
0
    def infer(im,
              i,
              output_name='None',
              video='None',
              mot_output=[],
              json_output=[],
              masks_output=[]):
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
        boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
            cls_boxes, cls_segms, cls_keyps)

        if segms is not None:
            masks = mask_util.decode(segms)
        else:
            masks = np.asarray([[[]]])  # an empty array with shape[2] == 0
        all_contours = []  # This might not be getting reset
        for mask_idx in range(masks.shape[2]):
            #print("shapes are {}".format(masks[...,mask_idx].shape))
            _, contours, _ = cv2.findContours(
                masks[..., mask_idx].copy(), cv2.RETR_LIST,
                cv2.CHAIN_APPROX_SIMPLE)  # why is this getting copied
            all_contours.append(
                contours
            )  # this code is more general and allows for multiple contours, but there aren't any

        if boxes is None:
            boxes = []
        else:
            boxes = boxes.tolist()
            print("classes are {}".format(classes))

        # create the mot formated row
        def mot_row(i, boxes, classes):
            """<frame>, <id=class>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x=-1>, <y=-1>, <z=-1>
            """
            assert len(boxes) == len(
                classes), "the boxes weren't the same length as the boxes"
            out_ = np.empty((0, 10), float)
            for box_id, box_ in enumerate(boxes):
                class_ = classes[box_id]
                # check that the conversion is correct
                # and that conf is where I think it is
                out_ = np.append(out_,
                                 np.array([[
                                     i, classes[box_id], box_[0], box_[1],
                                     box_[2] - box_[0], box_[3] - box_[1],
                                     box_[4], -1.0, -1.0, -1.0
                                 ]]),
                                 axis=0)
            return out_

        if args.mot_format:
            #TODO actually write out the class o fthe detection
            try:
                mot_output = np.append(mot_output,
                                       mot_row(i, boxes, classes),
                                       axis=0)
            except ValueError:
                import pdb
                pdb.set_trace()
            json_output.append({
                'video':
                video,
                'frame':
                i,
                'boxes':
                boxes,
                'classes':
                classes,
                'contours': [[c.tolist() for c in some_contours]
                             for some_contours in all_contours]
            })
            #masks_output[i] = [[c.tolist() for c in some_contours] for some_contours in all_contours]# this will be a mapping from frame to contours
        else:
            json_output.append({
                'video':
                video,
                'frame':
                i,
                'boxes':
                boxes,
                'classes':
                classes,
                'contours': [[c.tolist() for c in some_contours]
                             for some_contours in all_contours]
            })

        if i % 100 == 0:
            print("about to save files")
            save_files(
                args, im_name, output_basename, mot_output, json_output,
                masks_output
            )  # so this keeps all the data in memory which seems terrible
            json_output = []
            mot_output = []
            masks_output = []
            if len(json_output) != 0 or len(masks_output) != 0 or len(
                    mot_output) != 0:
                import pdb
                pdb.set_trace()

        # MOD
        print('output name {}'.format(output_name))
        #HACK
        VISUALIZATION_FREQUENCY = 100
        if args.visualize and i % VISUALIZATION_FREQUENCY == 0:
            start_time = time.time()
            vis_utils.vis_one_image(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                output_name,
                "{}/images".format(args.output_dir),
                cls_boxes,
                cls_segms,
                cls_keyps,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.4,  # change the display confidence
                kp_thresh=2,
                ext=args.output_ext,
                out_when_no_box=args.out_when_no_box)
            print(
                "only visualizing every {}th frame, it took {} seconds".format(
                    VISUALIZATION_FREQUENCY,
                    time.time() - start_time))
        if args.mot_format:
            return mot_output
        else:
            pass
Esempio n. 33
0
    def next_batch(self):
        data_blob = np.zeros(
            (self.batch_size, input_size[0], input_size[1], 40),
            dtype=np.float32)
        gt_blob = np.zeros((self.batch_size, input_size[0], input_size[1]),
                           dtype=np.int32)
        batch_id = 0
        #for i in range(self.batch_size):
        while True:
            # print("start loading data batch {}".format(batch_id))
            entry = self.roidb[self.cur]
            self.cur += 1
            if self.cur >= self.num_samples:
                self.cur = 0
                if self.is_train:  # shuffle
                    random.shuffle(self.seq)

            im = cv2.imread(entry['image'])
            label = cv2.imread(entry['label'], 0)
            # model test create human
            timers = defaultdict(Timer)
            with c2_utils.NamedCudaScope(gpu_id):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    self.test_model, im, None, timers, entry)
                mask_res_top = test_res_top.res_top_result(self.test_model,
                                                           entry,
                                                           save_png=False)
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            mask_png_20 = vis_utils.vis_one_image(im[:, :, ::-1],
                                                  '{:d}_{:s}'.format(
                                                      self.cur, im_name),
                                                  output_dir,
                                                  cls_boxes_i,
                                                  segms=cls_segms_i,
                                                  keypoints=cls_keyps_i,
                                                  thresh=cfg.VIS_TH,
                                                  box_alpha=0.8,
                                                  dataset=None,
                                                  show_class=False,
                                                  save_png=False)
            if len(mask_png_20.shape) == 2:
                print("data next continue")
                continue
            #resize
            #print("mask_res_top shape:", mask_res_top.shape)
            mask_restop_resize = cv2.resize(mask_res_top.transpose([1, 2, 0]),
                                            (input_size[0], input_size[1]),
                                            interpolation=cv2.INTER_LINEAR)
            #print("mask_restop_resize: ", mask_restop_resize.shape)
            data_blob[batch_id, :, :, 0:20] = mask_restop_resize
            #print("data_blob:", data_blob.shape)

            # print("mask_png_20 shape", mask_png_20.shape)
            mask_png_20 = mask_png_20.transpose((1, 2, 0))
            #print("mask_png_20 shape:", mask_png_20.shape)
            #print("data_blob shape:", data_blob.shape)
            data_blob[batch_id, :, :,
                      20:] = cv2.resize(mask_png_20,
                                        (input_size[0], input_size[1]),
                                        interpolation=cv2.INTER_LINEAR)

            gt_blob[batch_id] = cv2.resize(label,
                                           (input_size[0], input_size[1]),
                                           interpolation=cv2.INTER_NEAREST)
            # find c==4 sunglasses
            index = np.where(gt_blob[batch_id] == 4)
            if len(index[0]):
                print('have sunglass, pixles:', len(index[0]))

            # print("loaded data batch {}".format(batch_id))
            batch_id += 1
            if batch_id >= self.batch_size:
                break
        data_blob = np.transpose(data_blob, [0, 3, 1, 2])  # NHWC-> NCHW
        return data_blob, gt_blob
Esempio n. 34
0
def vis_wholedataset(
    dataset_name,
    proposal_file,
    output_dir,
    ind_range=None,
    all_boxes=None,
    all_segms=None,
    all_keyps=None,
    all_personmasks=None,
    all_parss=None,
    all_bodys=None,
    img_name=None,
    show_box=True,
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range)
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES

    timers = defaultdict(Timer)
    txt_all = []
    parsing_i_png = None
    for i, entry in enumerate(roidb):
        if img_name is not None:
            if entry['image'].split('/')[-1] not in img_name:
                continue
        if 'has_no_densepose' in entry.keys():
            pass
        else:
            if cfg.TEST.PRECOMPUTED_PROPOSALS:
                # The roidb may contain ground-truth rois (for example, if the roidb
                # comes from the training or val split). We only want to evaluate
                # detection on the *non*-ground-truth rois. We select only the rois
                # that have the gt_classes field set to 0, which means there's no
                # ground truth.
                box_proposals = entry['boxes'][entry['gt_classes'] == 0]
                if len(box_proposals) == 0:
                    continue
            else:
                # Faster R-CNN type models generate proposals on-the-fly with an
                # in-network RPN; 1-stage models don't require proposals.
                box_proposals = None

            im = cv2.imread(entry['image'])
            cls_boxes_i, cls_segms_i, cls_keyps_i, cls_personmask_i, cls_parss_i, cls_bodys_i = \
                get_ind_results(i, all_boxes), get_ind_results(i, all_segms), get_ind_results(i,all_keyps), \
                get_ind_results(i,all_personmasks), get_ind_results(i,all_parss), get_ind_results(i,all_bodys)
            if cfg.MODEL.PARSING_ON:
                parsing_i_png, txt_result = parsing_utils.parsing2png(
                    cls_boxes_i, cls_parss_i, output_dir, entry['image'],
                    im.shape[:2])
                uv_parsing_i_png, uv_txt_result = parsing_utils.parsing2png(
                    cls_boxes_i,
                    cls_bodys_i,
                    output_dir,
                    entry['image'],
                    im.shape[:2],
                    uv=True,
                )
                '''
                person_mask_i_png, txt_result_pm = parsing_utils.parsing2png(
                    cls_boxes_i, cls_personmask_i, output_dir, entry['image'], im.shape[:2],flag_pm=True,
                )
                '''
                txt_all.append(txt_result)
        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (timers['im_detect_bbox'].average_time +
                        timers['im_detect_mask'].average_time +
                        timers['im_detect_keypoints'].average_time +
                        timers['im_detect_body_uv'].average_time)
            misc_time = (timers['misc_bbox'].average_time +
                         timers['misc_mask'].average_time +
                         timers['misc_keypoints'].average_time +
                         timers['misc_body_uv'].average_time)
            logger.info(('im_detect: range [{:d}, {:d}] of {:d}: '
                         '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})').format(
                             start_ind + 1, end_ind, total_num_images,
                             start_ind + i + 1, start_ind + num_images,
                             det_time, misc_time, eta))

        if cfg.VIS and not 'has_no_densepose' in entry.keys():
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis'),
                cls_boxes_i,
                segms=cls_personmask_i,
                keypoints=cls_keyps_i,
                body_uv=cls_bodys_i,
                part_segms=parsing_i_png,
                uv_part_segms=uv_parsing_i_png,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True,
                show_box=show_box,
            )
    return all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys
def test_net(weights_file,
             dataset_name,
             proposal_file,
             output_dir,
             ind_range=None,
             gpu_id=0,
             subset_pointer=None):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    # determine file name
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)

    # load results if already present
    if os.path.exists(det_file):
        res = load_object(det_file)
        all_boxes, all_segms, all_keyps = res['all_boxes'], res[
            'all_segms'], res['all_keyps']
    else:

        roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
            dataset_name, proposal_file, ind_range)

        if subset_pointer is not None:
            voc_subset = subset_pointer.subset
            this_sub = voc_subset[:len(roidb)]
            # subset_pointer.subset = voc_subset[len(roidb):]

            # filter roidb:
            roidb = [roi for taking, roi in zip(this_sub, roidb) if taking]

            total_num_images = len(roidb)
            end_ind = total_num_images

        model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)

        num_images = len(roidb)
        num_classes = cfg.MODEL.NUM_CLASSES

        all_boxes, all_segms, all_keyps = empty_results(
            num_classes, num_images)
        if cfg.TEST.COLLECT_ALL:
            all_feats = []
            all_class_weights = np.empty(shape=(num_images, num_classes),
                                         dtype=np.float32)

        timers = defaultdict(Timer)

        for i, entry in enumerate(roidb):
            if cfg.TEST.PRECOMPUTED_PROPOSALS:
                # The roidb may contain ground-truth rois (for example, if the roidb
                # comes from the training or val split). We only want to evaluate
                # detection on the *non*-ground-truth rois. We select only the rois
                # that have the gt_classes field set to 0, which means there's no
                # ground truth.
                box_proposals = entry['boxes'][entry['gt_classes'] == 0]
                if len(box_proposals) == 0:
                    continue
            else:
                # Faster R-CNN type models generate proposals on-the-fly with an
                # in-network RPN; 1-stage models don't require proposals.
                box_proposals = None

            im = cv2.imread(entry['image'])
            with c2_utils.NamedCudaScope(gpu_id):
                cls_boxes_i, cls_segms_i, cls_keyps_i, sum_softmax, topk_feats = im_detect_all(
                    model,
                    im,
                    box_proposals,
                    timers,
                    return_feats=cfg.TEST.COLLECT_ALL)

            # print('nfeats:', topk_feats.shape[0])
            # print(topk_feats)

            extend_results(i, all_boxes, cls_boxes_i)
            if cls_segms_i is not None:
                extend_results(i, all_segms, cls_segms_i)
            if cls_keyps_i is not None:
                extend_results(i, all_keyps, cls_keyps_i)

            if cfg.TEST.COLLECT_ALL:
                all_class_weights[i] = sum_softmax
                all_feats.append(
                    topk_feats
                )  # will accumulate about 9 Gb of feats on COCO train set (118K imgs)

            if i % 10 == 0:  # Reduce log file size
                ave_total_time = np.sum(
                    [t.average_time for t in timers.values()])
                eta_seconds = ave_total_time * (num_images - i - 1)
                eta = str(datetime.timedelta(seconds=int(eta_seconds)))
                det_time = (timers['im_detect_bbox'].average_time +
                            timers['im_detect_mask'].average_time +
                            timers['im_detect_keypoints'].average_time)
                misc_time = (timers['misc_bbox'].average_time +
                             timers['misc_mask'].average_time +
                             timers['misc_keypoints'].average_time)
                logger.info(('im_detect: range [{:d}, {:d}] of {:d}: '
                             '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})').format(
                                 start_ind + 1, end_ind, total_num_images,
                                 start_ind + i + 1, start_ind + num_images,
                                 det_time, misc_time, eta))

            if cfg.VIS:
                im_name = os.path.splitext(os.path.basename(entry['image']))[0]
                vis_utils.vis_one_image(im[:, :, ::-1],
                                        '{:d}_{:s}'.format(i, im_name),
                                        os.path.join(output_dir, 'vis'),
                                        cls_boxes_i,
                                        segms=cls_segms_i,
                                        keypoints=cls_keyps_i,
                                        thresh=cfg.VIS_TH,
                                        box_alpha=0.8,
                                        dataset=dataset,
                                        show_class=True)

        cfg_yaml = envu.yaml_dump(cfg)
        save_object(
            dict(all_boxes=all_boxes,
                 all_segms=all_segms,
                 all_keyps=all_keyps,
                 cfg=cfg_yaml), det_file)
        logger.info('Wrote detections to: {}'.format(
            os.path.abspath(det_file)))
        if cfg.TEST.COLLECT_ALL:
            save_object(all_class_weights,
                        os.path.join(output_dir, 'class_weights.pkl'))
            save_object(all_feats,
                        os.path.join(output_dir, 'feature_vectors.pkl'))
            logger.info(
                'Wrote class weights and feature vectors to output folder')

    return all_boxes, all_segms, all_keyps