def _test_std(self):
        current_dir = osp.dirname(osp.realpath(__file__))
        cfg_file = osp.join(current_dir, '..', 'configs', 'R-50_1x.yaml')
        merge_cfg_from_file(cfg_file)
        cfg.TEST.WEIGHTS = osp.join(
            current_dir, '..', 'outputs', 'train',
            'coco_2014_train+coco_2014_valminusminival', 'R-50_1x', 'default',
            'model_final.pkl')
        cfg.RETINANET.INFERENCE_TH = 0.

        dataset = JsonDataset('coco_2014_minival')
        roidb = dataset.get_roidb()
        model = model_builder.create(cfg.MODEL.TYPE, train=False, gpu_id=0)
        utils.net.initialize_gpu_from_weights_file(model,
                                                   cfg.TEST.WEIGHTS,
                                                   gpu_id=0)
        model_builder.add_inference_inputs(model)
        workspace.CreateNet(model.net)
        workspace.CreateNet(model.conv_body_net)
        num_images = len(roidb)
        num_classes = cfg.MODEL.NUM_CLASSES
        entry = roidb[0]
        im = cv2.imread(entry['image'])
        with utils.c2.NamedCudaScope(0):
            cls_boxes, cls_preds, cls_probs, box_preds, anchors, im_info = im_detect_bbox(
                model, im, debug=True)

        workspace.ResetWorkspace()

        return cls_preds, cls_probs, box_preds, anchors, im_info
Пример #2
0
def main(args):
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, timers=timers)
        image = vis_utils.vis_one_image_opencv(np.array(frame),
                                               cls_boxes,
                                               cls_segms,
                                               cls_keyps,
                                               thresh=0.7,
                                               kp_thresh=2,
                                               show_box=True,
                                               show_class=True)
        cv2.imshow('camera', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        print("Time:", time.time() - t)
    cap.release()
    cv2.destroyAllWindows()
Пример #3
0
def main():
    args = parser.parse_args()
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    '''
    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    '''
    #for i, im_name in enumerate(im_list):
    #out_name = os.path.join(
    #    args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
    #)
    #logger.info('Processing {} -> {}'.format(im_name, out_name))
    #im = cv2.imread(im_name)
    hintDataURL = bottle.request.forms.get("hint")
    hintDataURL = re.sub('^data:image/.+;base64,', '', hintDataURL)
    hintDataURL = base64.urlsafe_b64decode(hintDataURL.encode("ascii"))
    hintDataURL = np.fromstring(hintDataURL, dtype=np.uint8)
    hintDataURL = cv2.imdecode(hintDataURL, -1)
    hstr = str(np.random.randint(100, 999))
    cv2.imwrite('record/' + hstr + '.hint.png', hintDataURL)
    im = cv2.imread('record/' + hstr + '.hint.png')
    timers = defaultdict(Timer)
    t = time.time()
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
            model, im, None, timers=timers)
    return 'ok'
Пример #4
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    #jasonj
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            #jasonj
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, "infer_res.jpg", None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
def get_rpn_box_proposals(im, args):
    cfg.immutable(False)
    
    """Load a yaml config file and merge it into the global config."""
    merge_cfg_from_file(args.rpn_cfg)
    
    '''Number of GPUs to use (applies to both training and testing)'''
    cfg.NUM_GPUS = 1
    
    '''Indicates the model's computation terminates with the production of RPN  proposals (i.e., it outputs proposals ONLY, no actual object detections)'''
    cfg.MODEL.RPN_ONLY = True
    
    '''Number of top scoring RPN proposals to keep before applying NMS When FPN is used, this is *per FPN level* (not total)'''
    cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
    
    '''Number of top scoring RPN proposals to keep after applying NMS his is the total number of RPN proposals produced (for both FPN and non-FPN cases)'''
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000
    
    '''Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging
    command line config options, etc.)'''
    assert_and_infer_cfg()

    """Initialize a model from the global cfg. Loads test-time weights and creates the networks in the Caffe2 workspace. """
    model = model_engine.initialize_model_from_cfg(args.rpn_pkl)
    
    with c2_utils.NamedCudaScope(0):
        """Generate RPN proposals on a single image."""
        boxes, scores = rpn_engine.im_proposals(model, im)
    return boxes, scores
Пример #6
0
    def build_graph(self):
        c2_utils.import_detectron_ops()
        # OpenCL may be enabled by default in OpenCV3; disable it because it's not
        # thread safe and causes unwanted GPU memory allocations.
        cv2.ocl.setUseOpenCL(False)

        merge_cfg_from_file(self.config.args['config_path'])

        # If this is a CPU kernel, tell Caffe2 that it should not use
        # any GPUs for its graph operations
        cpu_only = True
        for handle in self.config.devices:
            if handle.type == DeviceType.GPU.value:
                cpu_only = False

        if cpu_only:
            cfg.NUM_GPUS = 0
        else:
            cfg.NUM_GPUS = 1
        # TODO: wrap this in "with device"
        weights_path = cache_url(self.config.args['weights_path'],
                                 cfg.DOWNLOAD_CACHE)
        assert_and_infer_cfg(cache_urls=False)
        model = infer_engine.initialize_model_from_cfg(weights_path)
        return model
Пример #7
0
    def __init__(self, cfg_path, weights_path):

        #nms_same_class 0.3  ----  *.yaml/TEST:NMS:0.3 中设置 defalut 0.5

        self.gpu_id = 1  #gpu_id default 0

        self.score_thresh = 0.4  #score > score_thresh  default 0.3

        self.per_class_thresh = False  #score > class_score_thresh
        self.autotruck_score_thresh = 0.6
        self.forklift_score_thresh = 0.65
        self.digger_score_thresh = 0.65
        self.car_score_thresh = 0.45
        self.bus_score_thresh = 0.0
        self.tanker_score_thresh = 0.55
        self.person_score_thresh = 0.35
        self.minitruck_score_thresh = 0.0
        self.minibus_score_thresh = 0.59

        self.class_nms_thresh = 0.85  #nms_between_classes  IOU > class_nms_thresh    default 0.9
        merge_cfg_from_file(cfg_path)
        self.model = infer_engine.initialize_model_from_cfg(
            weights_path, self.gpu_id)
        self.dummy_coco_dataset = dummy_datasets.get_steal_oil_class14_dataset(
        )
        print("model is ok")
Пример #8
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1']
    )
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run
    checkpoints = train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
Пример #9
0
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run
    checkpoints = train_model()
    # Test the trained model
    if not args.skip_test:
        test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
def main(args):

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    # get the weight path
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    det_dir = os.path.join(args.input_dir, 'det')
    if not os.path.exists(det_dir):
        os.makedirs(det_dir)

    txt_file = os.path.join(det_dir, "det.txt")
    fid = open(txt_file, 'w')

    img_dir = os.path.join(args.input_dir, "img1")
    img_list = os.listdir(img_dir)
    img_list = sorted(img_list)

    for i in range(len(img_list)):
        print("processing: %d/%d" % (i + 1, len(img_list)))
        img_name = img_list[i][:-4]
        img_idx = int(img_name)
        img_path = os.path.join(img_dir, img_list[i])
        frame = cv2.imread(img_path)

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, None)

        vis_utils.write_txt(fid, img_idx, cls_boxes)

    fid.close()
Пример #11
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    
    video_frame_length = video_frame(args)  #Yunhan Yang pass in a list to save all the frames
    print ("The video has length " + str(video_frame_length) + " frames")
    
    
    f_image_path = args.im_or_folder+ "frame%d.jpg" % 0
    fr = cv2.imread(f_image_path, 0)
    origin_width, origin_height = fr.shape[:2]
    
    for x in range(0,video_frame_length):
        im_list = [args.im_or_folder+"frame" + str(x) + ".jpg"] #Yunhan Yang have to save frame in real folder and then read in

        #maybe need need double for loop for list of frames
        for i, im_name in enumerate(im_list):
            out_name = os.path.join(
                                    args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
                                    )
            logger.info('Processing {} -> {}'.format(im_name, out_name))
            im = cv2.imread(im_name)
            timers = defaultdict(Timer)
            t = time.time()
            with c2_utils.NamedCudaScope(0):
                cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                                                                             model, im, None, timers=timers
                                                                             )
            logger.info('Inference time: {:.3f}s'.format(time.time() - t))
                for k, v in timers.items():
                    logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
                        if i == 0:
                            logger.info(
                                        ' \ Note: inference on the first image will be slower than the '
                                        'rest (caches and auto-tuning need to warm up)'
                                        )
            
            #Yunhan Yang edit
            if not os.path.exists(args.im_or_folder+ "/video"):
                os.makedirs(args.im_or_folder+ "/video")
            #Yunhan Yang edit Detectron/lib/utils/vis.py add make result as jpg than pdf

            vis_utils.vis_one_image(
                                    im[:, :, ::-1],  # BGR -> RGB for visualization
                                    im_name,
                                    args.output_dir,
                                    cls_boxes,
                                    cls_segms,
                                    cls_keyps,
                                    dataset=dummy_coco_dataset,
                                    box_alpha=0.3,
                                    show_class=True,
                                    thresh=0.7,
                                    kp_thresh=2
                                    )
Пример #12
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_dataset = dummy_datasets.get_cifar100_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    for i, im_name in enumerate(im_list):
        logger.info('Processing {}'.format(im_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_scores, _, _ = infer_engine.im_detect_all(model,
                                                          im,
                                                          None,
                                                          timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        cl = np.argmax(cls_scores)
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
            logger.info(' | Class is: {}'.format(dummy_dataset.classes[cl]))
            logger.info(' | Class Confidance is: {:.2f}%'.format(
                cls_scores[cl] * 100))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
Пример #13
0
    def _test_std(self):
        root_dir = osp.join('/private', 'home', 'xinleic', 'pyramid')
        cfg_file = osp.join(root_dir, 'configs', 'visual_genome', 'e2e_faster_rcnn_R-50-FPN_1x.yaml')
        merge_cfg_from_file(cfg_file)
        cfg.NUM_GPUS = 1
        cfg.TEST.RPN_PRE_NMS_TOP_N = 100
        cfg.TEST.RPN_POST_NMS_TOP_N = 20
        assert_and_infer_cfg()
        test_weight = osp.join(root_dir, 'outputs', 'train', 'visual_genome_train', 
                            'e2e_faster_rcnn_R-50-FPN_1x', 'RNG_SEED#3', 'model_final.pkl')
        model = test_engine.initialize_model_from_cfg(test_weight, gpu_id=0)
        dataset = JsonDataset('visual_genome_val')
        roidb = dataset.get_roidb()
        num_images = len(roidb)
        num_classes = cfg.MODEL.NUM_CLASSES
        entry = roidb[1]
        im = cv2.imread(entry['image'])
        max_level = cfg.FPN.RPN_MAX_LEVEL
        min_level = cfg.FPN.RPN_MIN_LEVEL
        # input: rpn_cls_probs_fpn2, rpn_bbox_pred_fpn2
        # output: rpn_rois_fpn2, rpn_roi_probs_fpn2
        with utils.c2.NamedCudaScope(0):
            # let's manually do the testing here
            inputs, im_scale = _get_blobs(im, None, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)

            for k, v in inputs.items():
                workspace.FeedBlob(core.ScopedName(k), v)

            workspace.RunNet(model.net.Proto().name)
            cls_probs = [core.ScopedName('rpn_cls_probs_fpn%d' % i) for i in range(min_level, max_level+1)]
            box_preds = [core.ScopedName('rpn_bbox_pred_fpn%d' % i) for i in range(min_level, max_level+1)]
            rpn_rois = [core.ScopedName('rpn_rois_fpn%d' % i) for i in range(min_level, max_level+1)]
            rpn_roi_probs = [core.ScopedName('rpn_roi_probs_fpn%d' % i) for i in range(min_level, max_level+1)]

            cls_probs = workspace.FetchBlobs(cls_probs)
            box_preds = workspace.FetchBlobs(box_preds)
            rpn_rois = workspace.FetchBlobs(rpn_rois)
            rpn_roi_probs = workspace.FetchBlobs(rpn_roi_probs)

        rpn_rois = np.vstack(rpn_rois)
        rpn_roi_probs = np.vstack(rpn_roi_probs)
        # remove the image dimension
        rpn_rois = rpn_rois[:, 1:]
        boxes = np.hstack([rpn_rois, rpn_roi_probs])
        im_name = osp.splitext(osp.basename(entry['image']))[0]
        utils.vis.vis_one_image(im[:, :, ::-1],
                                '{:s}-std-output'.format(im_name),
                                osp.join(root_dir, 'tests'),
                                boxes,
                                segms=None,
                                keypoints=None,
                                thresh=0.,
                                box_alpha=0.8,
                                dataset=dataset,
                                show_class=False) 
        workspace.ResetWorkspace()
        im_info = inputs['im_info'].astype(np.float32)

        return cls_probs, box_preds, im_info, im, im_name, root_dir, dataset
Пример #14
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
        t1 = time.time()
        im = vis_utils.vis_one_image_opencv(im,
                                            cls_boxes,
                                            segms=cls_segms,
                                            keypoints=cls_keyps,
                                            thresh=0.7,
                                            kp_thresh=2,
                                            show_box=True,
                                            dataset=dummy_coco_dataset,
                                            show_class=True)
        # vis_utils.vis_one_image(
        #     im[:, :, ::-1],  # BGR -> RGB for visualization
        #     im_name,
        #     args.output_dir,
        #     cls_boxes,
        #     cls_segms,
        #     cls_keyps,
        #     dataset=dummy_coco_dataset,
        #     box_alpha=0.3,
        #     show_class=True,
        #     thresh=0.7,
        #     kp_thresh=2
        # )
        t2 = time.time() - t1
        print("vis time %f ms" % (t2 * 1000))
        cv2.imwrite(
            args.output_dir + '/' + im_name.split('/')[-1].split('.')[0] +
            '.jpg', im)
Пример #15
0
def main():
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    logger.info('Conerting model with config:')
    logger.info(pprint.pformat(cfg))

    assert not cfg.MODEL.KEYPOINTS_ON, "Keypoint model not supported."
    assert not cfg.MODEL.MASK_ON, "Mask model not supported."
    assert not cfg.FPN.FPN_ON, "FPN not supported."
    assert not cfg.RETINANET.RETINANET_ON, "RetinaNet model not supported."

    # load model from cfg
    model, blobs = load_model(args)

    net = core.Net('')
    net.Proto().op.extend(copy.deepcopy(model.net.Proto().op))
    net.Proto().external_input.extend(
        copy.deepcopy(model.net.Proto().external_input))
    net.Proto().external_output.extend(
        copy.deepcopy(model.net.Proto().external_output))
    net.Proto().type = args.net_execution_type
    net.Proto().num_workers = 1 if args.net_execution_type == 'simple' else 4

    # Reset the device_option, change to unscope name and replace python operators
    convert_net(args, net.Proto(), blobs)

    # add operators for bbox
    add_bbox_ops(args, net, blobs)

    if args.fuse_af:
        print('Fusing affine channel...')
        net, blobs = mutils.fuse_net_affine(
            net, blobs)

    if args.use_nnpack:
        mutils.update_mobile_engines(net.Proto())

    # generate init net
    empty_blobs = ['data', 'im_info']
    init_net = gen_init_net(net, blobs, empty_blobs)

    if args.device == 'gpu':
        [net, init_net] = convert_model_gpu(args, net, init_net)

    net.Proto().name = args.net_name
    init_net.Proto().name = args.net_name + "_init"

    if args.test_img is not None:
        verify_model(args, [net, init_net], args.test_img)

    _save_models(net, init_net, args)
Пример #16
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    cfg.TRAIN.IMS_PER_BATCH = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, \
            cls_refined_segms, cls_refined_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            os.path.join(args.output_dir, 'vis_local'),
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            os.path.join(args.output_dir, 'vis_refined'),
            cls_boxes,
            cls_refined_segms,
            cls_refined_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
Пример #17
0
def get_model(cfg_file, weights_file):
    merge_cfg_from_file(cfg_file)
    cfg.TRAIN.WEIGHTS = ''  # NOTE: do not download pretrained model weights
    cfg.TEST.WEIGHTS = weights_file
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    return model
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    # for i, im_name in enumerate(im_list):
        # out_name = os.path.join(
        #     args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        # )
        # logger.info('Processing {} -> {}'.format(im_name, out_name))

    #set webcam
    cam = cv2.VideoCapture(0)
    while True:
        #Fetch image from camera
        ret_val, im = cam.read()
        #uncomment to resize image
        #im = cv2.resize(im, (1200,1024))

        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            "dummy_name",
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )

    cv2.destroyAllWindows()
Пример #19
0
def get_model(cfg_file, weights_file):  
    merge_cfg_from_file(cfg_file)  
    cfg.TRAIN.WEIGHTS = ''  # NOTE: do not download pretrained model weights  
    cfg.TEST.WEIGHTS = weights_file  
    cfg.NUM_GPUS = 1  
    assert_and_infer_cfg() 
    #according the cfg to bulid model
    model = model_builder.create(cfg.MODEL.TYPE,train=True)  
    return model  
Пример #20
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()

    predict_dataset(args.project, visualize=False, randomize=False)
Пример #21
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    start = timeit.default_timer()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    ##extract bboxes from bottom-up attention model
    image_bboxes = {}
    if args.bbox_file is not None:
        image_bboxes = extract_bboxes(args.bbox_file)

    count = 0
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    for i, im_name in enumerate(im_list):
        im_base_name = os.path.basename(im_name)
        image_id = int(im_base_name.split(".")[0].split("_")[-1])  ##for COCO
        if image_id % args.total_group == args.group_id:
            bbox = image_bboxes[image_id] if image_id in image_bboxes else None
            im = cv2.imread(im_name)
            if im is not None:
                outfile = os.path.join(args.output_dir,
                                       im_base_name.replace('jpg', 'npy'))
                lock_folder = outfile.replace('npy', 'lock')
                if not os.path.exists(lock_folder) and os.path.exists(outfile):
                    continue
                if not os.path.exists(lock_folder):
                    os.makedirs(lock_folder)

                result = get_detections_from_im(cfg,
                                                model,
                                                im,
                                                image_id,
                                                args.feat_name,
                                                args.min_bboxes,
                                                args.max_bboxes,
                                                bboxes=bbox)

                np.save(outfile, result)
                os.rmdir(lock_folder)

            count += 1

            if count % 100 == 0:
                end = timeit.default_timer()
                epoch_time = end - start
                print('process {:d} images after {:.1f} s'.format(
                    count, epoch_time))
Пример #22
0
 def __init__(self, config, **kwargs):
     kwargs['handlers'] = [(r'/analyse', AnalyseHandler)]
     super(Application, self).__init__(**kwargs)
     merge_cfg_from_file(config.get('DETECTRON', 'CONFIG'))
     cfg.TEST.WEIGHTS = config.get('DETECTRON', 'WEIGHTS')
     cfg.NUM_GPUS = 1
     assert_and_infer_cfg()
     self.model = infer_engine.initialize_model_from_cfg()
     self.dummy_coco_dataset = dummy_datasets.get_coco_dataset()
Пример #23
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    logger.info('about to assert')
    assert_and_infer_cfg()
    logger.info('About to initialise model')
    model = infer_engine.initialize_model_from_cfg()
    logger.info("Done initialising")
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.png'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        segmented_images, classes, scores, segmented_binary_masks = vis_utils.segmented_images_in_original_image_size(
            im,
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        found = False
        for index, value in enumerate(segmented_images):
            if classes[index] == args.class_label and not found:
                logger.info('Writing output file to: {}'.format(str(i)))
                bin_mask = vis_utils.vis_binary_mask(
                    im, segmented_binary_masks[index])
                cv2.imwrite(out_name, value)
                cv2.imwrite(out_name.rstrip(".png") + "bin.png", bin_mask)
                found = True
Пример #24
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    """
    Add support for webcam
    """
    # Set and get camera from OpenCV
    cam = cv2.VideoCapture(0)

    im_name = 'tmp_im'

    while True:
        # Fetch image from camera
        _, im = cam.read()

        timers = defaultdict(Timer)
        t = time.time()

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
            ext='jpg'  # default is PDF, but we want JPG.
        )

    cv2.destroyAllWindows()
Пример #25
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        # im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
        im_list = search(args.im_or_folder, args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
    #for i, im_name in im_list:
        #out_name = os.path.join(
         #   args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        #)
        out_name = im_name.replace(args.im_or_folder, args.output_dir)
        par_path = os.path.dirname(out_name)
        if not os.path.exists(par_path):
            os.makedirs(par_path)
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            par_path,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
            ext='png'
        )
Пример #26
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = (
        dummy_datasets.get_vg3k_dataset()
        if args.use_vg3k else dummy_datasets.get_coco_dataset())

    if os.path.isdir(args.im_or_folder):
        im_list = sorted(glob.iglob(args.im_or_folder + '/*.' + args.image_ext))
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.npz')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        if im is None:
            logger.info('Unable to read image, skipping.')
            continue
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        boxes, segms, classes = convert(cls_boxes, cls_segms)
        classes = np.array(classes, dtype=np.uint16)
        resolution = segms[0]['size']
        segms = np.array([x['counts'] for x in segms]) # Run-length encoding
        
        valid = boxes[:, 4] >= args.thresh
        if args.filter_classes:
            valid &= np.isin(classes, all_classes)
            
        boxes = boxes[valid].copy()
        classes = classes[valid].copy()
        segms = segms[valid].copy()
        
        output_name = os.path.basename(im_name)
        np.savez(args.output_dir + '/' + output_name, boxes=boxes, segments=segms, classes=classes, resolution=resolution)
Пример #27
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    test_ids = []
    rles = []

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        new_test_ids, new_rles = vis_utils.make_submission(im[:, :, ::-1],
                                                           im_name,
                                                           cls_boxes,
                                                           cls_segms,
                                                           cls_keyps,
                                                           thresh=0.7)

        test_ids.extend(new_test_ids)
        rles.extend(new_rles)

    import pandas as pd
    sub = pd.DataFrame()
    sub['ImageId'] = test_ids
    sub['EncodedPixels'] = pd.Series(rles).apply(
        lambda x: ' '.join(str(y) for y in x))
    sub.to_csv(args.output_dir +
               '/e2e_mask_rcnn_R-50-FPN_1x-lr3e-3-nuclei-6-new.csv',
               index=False)
Пример #28
0
def main(args):
    logger = logging.getLogger(__name__)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = yaml.load(yaml.dump(cfg))
    im = cv2.imread(args.im_file)

    if args.rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps = None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        cfg.immutable(False)
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            weights_file = pkl
        else:
            weights_file = cfg.TEST.WEIGHTS
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg()
        model = model_engine.initialize_model_from_cfg(weights_file)
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ = \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        workspace.ResetWorkspace()

    out_name = os.path.join(
        args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf')
    )
    logger.info('Processing {} -> {}'.format(args.im_file, out_name))

    vis_utils.vis_one_image(
        im[:, :, ::-1],
        args.im_file,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2
    )
Пример #29
0
def track(cfg_path, opts):

    # Setup tracker configuration
    merge_cfg_from_file(cfg_path)

    opts = opts.split(' ')
    if len(opts) > 0:
        merge_cfg_from_list(opts)

    device = torch.device('cuda:{}'.format(0))

    tracker = SiamTracker(device)

    def load_image(img_path, use_pil):
        if use_pil:
            pil_img = Image.open(img_path)
            if pil_img.mode == 'L':
                pil_img = pil_img.convert(
                    'RGB')  # convert to RGB 3 channels if necessary
            im_tensor = to_tensor(pil_img)
        else:
            im = cv2.imread(img_path, cv2.IMREAD_COLOR)  # HxWxC
            im_tensor = torch.from_numpy(np.transpose(im, (2, 0, 1))).float()
        im_tensor = im_tensor.unsqueeze(0).to(device)  # 1*C*H*W
        return im_tensor

    # start to track
    handle = vot.VOT("polygon")
    Polygon = handle.region()

    box_cxcywh = vot.get_axis_aligned_bbox(Polygon)
    # convert to xyxy
    box_xyxy = ubox.xcycwh_to_xyxy(box_cxcywh)

    image_file = handle.frame()

    if not image_file:
        sys.exit(0)

    im_tensor = load_image(image_file, tracker.use_pil)
    tracker.tracker.init_tracker(im_tensor, box_xyxy)

    while True:
        image_file = handle.frame()
        if not image_file:
            break
        im_tensor = load_image(image_file, tracker.use_pil)
        box_xyxy = tracker.tracker.predict_next_frame(im_tensor, box_xyxy)
        box_xywh = ubox.xyxy_to_xywh(box_xyxy)

        handle.report(
            Rectangle(box_xywh[0], box_xywh[1], box_xywh[2], box_xywh[3]))
Пример #30
0
def get_rpn_box_proposals(im, args):
    merge_cfg_from_file(args.rpn_cfg)
    cfg.TEST.WEIGHTS = args.rpn_pkl
    cfg.NUM_GPUS = 1
    cfg.MODEL.RPN_ONLY = True
    cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000
    assert_and_infer_cfg()

    model = model_engine.initialize_model_from_cfg()
    with c2_utils.NamedCudaScope(0):
        boxes, scores = rpn_engine.im_proposals(model, im)
    return boxes, scores
Пример #31
0
    def __init__(self):
        c2_utils.import_detectron_ops()
        # OpenCL may be enabled by default in OpenCV3; disable it because it's not
        # thread safe and causes unwanted GPU memory allocations.
        cv2.ocl.setUseOpenCL(False)

        merge_cfg_from_file(
            '/home/king/Documents/measurement/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml'
        )
        cfg.NUM_GPUS = 1
        weights = '/home/king/Documents/measurement/model_final.pkl'
        assert_and_infer_cfg(cache_urls=False)
        self.model = infer_engine.initialize_model_from_cfg(weights)
Пример #32
0
def get_rpn_box_proposals(im, args):
    merge_cfg_from_file(args.rpn_cfg)
    cfg.TEST.WEIGHTS = args.rpn_pkl
    cfg.NUM_GPUS = 1
    cfg.MODEL.RPN_ONLY = True
    cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000
    assert_and_infer_cfg()

    model = model_engine.initialize_model_from_cfg()
    with c2_utils.NamedCudaScope(0):
        boxes, scores = rpn_engine.im_proposals(model, im)
    return boxes, scores
Пример #33
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )
Пример #34
0
def main(args):
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = yaml.load(yaml.dump(cfg))
    im = cv2.imread(args.im_file)

    if args.rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps = None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            cfg.TEST.WEIGHTS = pkl
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg()
        model = model_engine.initialize_model_from_cfg()
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ = \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        workspace.ResetWorkspace()

    vis_utils.vis_one_image(
        im[:, :, ::-1],
        args.im_file,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2
    )
Пример #35
0
        logger.info('{:d}/{:d}: Averge dequeue time: {:.3f}s  [{:d}/{:d}]'.
                    format(i + 1, opts.num_batches, total_time / (i + 1),
                           roi_data_loader._minibatch_queue.qsize(),
                           opts.minibatch_queue_size))
        # Sleep to simulate the time taken by running a little network
        time.sleep(opts.sleep_time)
        # To inspect:
        # blobs = workspace.FetchBlobs(all_blobs)
        # from IPython import embed; embed()
    logger.info('Shutting down data loader (EnqueueBlob errors are ok)...')
    roi_data_loader.shutdown()


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    logger = utils.logging.setup_logging(__name__)
    logger.setLevel(logging.DEBUG)
    logging.getLogger('roi_data.loader').setLevel(logging.INFO)
    np.random.seed(cfg.RNG_SEED)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Running with config:')
    logger.info(pprint.pformat(cfg))
    main(args)