Ejemplo n.º 1
0
    def __init__(self):
        self.cfg = rospy.get_param('~cfg', 'e2e_mask_rcnn_R-101-FPN_2x.yaml')
        self.wts = rospy.get_param('~wts', 'model_final.pkl')
        self.train_wts = rospy.get_param('~train_wts', 'R-101.pkl')
        self.confidence = rospy.get_param('~confidence', 0.9)
        self.sub_img_topic = rospy.get_param('~sub_img_topic',
                                             "image_rect_color")
        self.frame_rate = rospy.get_param('~frame_rate', 30)

        self.gpu_id = rospy.get_param('~gpu_id', 0)
        self.mask_on = rospy.get_param('~mask_on', True)

        merge_cfg_from_file(self.cfg)
        cfg.NUM_GPUS = 2
        cfg.MODEL.MASK_ON = True
        cfg.TRAIN.WEIGHTS = self.train_wts
        assert_and_infer_cfg()
        utils.logging.setup_logging(__name__)
        self.logger = logging.getLogger(__name__)
        self.model = infer_engine.initialize_model_from_cfg(self.wts,
                                                            gpu_id=self.gpu_id)
        self.model1 = infer_engine.initialize_model_from_cfg(self.wts,
                                                             gpu_id=1)
        self.dummy_coco_dataset = dummy_datasets.get_coco_dataset()

        self.pupil_subscriber = rospy.Subscriber('/scene/left/fit_point',
                                                 ImagePoint,
                                                 self.pupil_callback,
                                                 queue_size=1)
        self.image_subscriber = rospy.Subscriber(self.sub_img_topic,
                                                 Image,
                                                 self.callback,
                                                 queue_size=1)
        # self.compressed_image_subscriber = rospy.Subscriber(self.sub_img_topic+"/compressed", CompressedImage, self.compimgcallback, queue_size=1)

        self.pub_bboxes_topic = rospy.resolve_name(
            self.sub_img_topic) + '/bboxes'
        print("Mask RCNN Initialized")
        self.bboxes_publisher = rospy.Publisher(self.pub_bboxes_topic,
                                                BBoxDetArray,
                                                queue_size=1)
        self.pub_img_topic = self.sub_img_topic + "_detection"
        self.image_publisher = rospy.Publisher(self.pub_img_topic,
                                               Image,
                                               queue_size=20)
        self.bridge = CvBridge()
        self.last_detect = rospy.Time.now()
        self.fit_point = []
Ejemplo n.º 2
0
def main(name_scope, gpu_dev, num_images, args):
    t=args.t
    model = initialize_model_from_cfg()
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    temp_frame_folder = osp.join(args.out_path,args.vid_name + '_frames/',str(t))
    imgs = glob.glob(temp_frame_folder+'/*.jpg')
    for i in range(len(imgs)):
        if i%100==0:
          print('Processing Detection for Frame %d'%(i+1))
        im_ = cv2.imread(imgs[i])
        assert im_ is not None
        im_ = np.expand_dims(im_, 0)
        with core.NameScope(name_scope):
            with core.DeviceScope(gpu_dev):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    model, im_, None)                                        #TODO: Parallelize detection

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

    det_name = args.vid_name + '_' + str(args.t) + '_detections.pkl'
    det_file = osp.join(args.out_path, det_name)
    robust_pickle_dump(dict(all_keyps=all_keyps),det_file)
    shutil.rmtree(osp.join(args.out_path, args.vid_name + '_frames'))
Ejemplo n.º 3
0
def main():
    args = parser.parse_args()
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    '''
    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    '''
    #for i, im_name in enumerate(im_list):
    #out_name = os.path.join(
    #    args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
    #)
    #logger.info('Processing {} -> {}'.format(im_name, out_name))
    #im = cv2.imread(im_name)
    hintDataURL = bottle.request.forms.get("hint")
    hintDataURL = re.sub('^data:image/.+;base64,', '', hintDataURL)
    hintDataURL = base64.urlsafe_b64decode(hintDataURL.encode("ascii"))
    hintDataURL = np.fromstring(hintDataURL, dtype=np.uint8)
    hintDataURL = cv2.imdecode(hintDataURL, -1)
    hstr = str(np.random.randint(100, 999))
    cv2.imwrite('record/' + hstr + '.hint.png', hintDataURL)
    im = cv2.imread('record/' + hstr + '.hint.png')
    timers = defaultdict(Timer)
    t = time.time()
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
            model, im, None, timers=timers)
    return 'ok'
def main(args):

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    # get the weight path
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    det_dir = os.path.join(args.input_dir, 'det')
    if not os.path.exists(det_dir):
        os.makedirs(det_dir)

    txt_file = os.path.join(det_dir, "det.txt")
    fid = open(txt_file, 'w')

    img_dir = os.path.join(args.input_dir, "img1")
    img_list = os.listdir(img_dir)
    img_list = sorted(img_list)

    for i in range(len(img_list)):
        print("processing: %d/%d" % (i + 1, len(img_list)))
        img_name = img_list[i][:-4]
        img_idx = int(img_name)
        img_path = os.path.join(img_dir, img_list[i])
        frame = cv2.imread(img_path)

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, None)

        vis_utils.write_txt(fid, img_idx, cls_boxes)

    fid.close()
Ejemplo n.º 5
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_dataset = dummy_datasets.get_cifar100_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    for i, im_name in enumerate(im_list):
        logger.info('Processing {}'.format(im_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_scores, _, _ = infer_engine.im_detect_all(model,
                                                          im,
                                                          None,
                                                          timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        cl = np.argmax(cls_scores)
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
            logger.info(' | Class is: {}'.format(dummy_dataset.classes[cl]))
            logger.info(' | Class Confidance is: {:.2f}%'.format(
                cls_scores[cl] * 100))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
Ejemplo n.º 6
0
def main(args):
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, timers=timers)
        image = vis_utils.vis_one_image_opencv(np.array(frame),
                                               cls_boxes,
                                               cls_segms,
                                               cls_keyps,
                                               thresh=0.7,
                                               kp_thresh=2,
                                               show_box=True,
                                               show_class=True)
        cv2.imshow('camera', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        print("Time:", time.time() - t)
    cap.release()
    cv2.destroyAllWindows()
def get_rpn_box_proposals(im, args):
    cfg.immutable(False)
    
    """Load a yaml config file and merge it into the global config."""
    merge_cfg_from_file(args.rpn_cfg)
    
    '''Number of GPUs to use (applies to both training and testing)'''
    cfg.NUM_GPUS = 1
    
    '''Indicates the model's computation terminates with the production of RPN  proposals (i.e., it outputs proposals ONLY, no actual object detections)'''
    cfg.MODEL.RPN_ONLY = True
    
    '''Number of top scoring RPN proposals to keep before applying NMS When FPN is used, this is *per FPN level* (not total)'''
    cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
    
    '''Number of top scoring RPN proposals to keep after applying NMS his is the total number of RPN proposals produced (for both FPN and non-FPN cases)'''
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000
    
    '''Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging
    command line config options, etc.)'''
    assert_and_infer_cfg()

    """Initialize a model from the global cfg. Loads test-time weights and creates the networks in the Caffe2 workspace. """
    model = model_engine.initialize_model_from_cfg(args.rpn_pkl)
    
    with c2_utils.NamedCudaScope(0):
        """Generate RPN proposals on a single image."""
        boxes, scores = rpn_engine.im_proposals(model, im)
    return boxes, scores
Ejemplo n.º 8
0
    def __init__(self, cfg_path, weights_path):

        #nms_same_class 0.3  ----  *.yaml/TEST:NMS:0.3 中设置 defalut 0.5

        self.gpu_id = 1  #gpu_id default 0

        self.score_thresh = 0.4  #score > score_thresh  default 0.3

        self.per_class_thresh = False  #score > class_score_thresh
        self.autotruck_score_thresh = 0.6
        self.forklift_score_thresh = 0.65
        self.digger_score_thresh = 0.65
        self.car_score_thresh = 0.45
        self.bus_score_thresh = 0.0
        self.tanker_score_thresh = 0.55
        self.person_score_thresh = 0.35
        self.minitruck_score_thresh = 0.0
        self.minibus_score_thresh = 0.59

        self.class_nms_thresh = 0.85  #nms_between_classes  IOU > class_nms_thresh    default 0.9
        merge_cfg_from_file(cfg_path)
        self.model = infer_engine.initialize_model_from_cfg(
            weights_path, self.gpu_id)
        self.dummy_coco_dataset = dummy_datasets.get_steal_oil_class14_dataset(
        )
        print("model is ok")
Ejemplo n.º 9
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    #jasonj
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            #jasonj
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, "infer_res.jpg", None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
Ejemplo n.º 10
0
    def build_graph(self):
        c2_utils.import_detectron_ops()
        # OpenCL may be enabled by default in OpenCV3; disable it because it's not
        # thread safe and causes unwanted GPU memory allocations.
        cv2.ocl.setUseOpenCL(False)

        merge_cfg_from_file(self.config.args['config_path'])

        # If this is a CPU kernel, tell Caffe2 that it should not use
        # any GPUs for its graph operations
        cpu_only = True
        for handle in self.config.devices:
            if handle.type == DeviceType.GPU.value:
                cpu_only = False

        if cpu_only:
            cfg.NUM_GPUS = 0
        else:
            cfg.NUM_GPUS = 1
        # TODO: wrap this in "with device"
        weights_path = cache_url(self.config.args['weights_path'],
                                 cfg.DOWNLOAD_CACHE)
        assert_and_infer_cfg(cache_urls=False)
        model = infer_engine.initialize_model_from_cfg(weights_path)
        return model
Ejemplo n.º 11
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    
    video_frame_length = video_frame(args)  #Yunhan Yang pass in a list to save all the frames
    print ("The video has length " + str(video_frame_length) + " frames")
    
    
    f_image_path = args.im_or_folder+ "frame%d.jpg" % 0
    fr = cv2.imread(f_image_path, 0)
    origin_width, origin_height = fr.shape[:2]
    
    for x in range(0,video_frame_length):
        im_list = [args.im_or_folder+"frame" + str(x) + ".jpg"] #Yunhan Yang have to save frame in real folder and then read in

        #maybe need need double for loop for list of frames
        for i, im_name in enumerate(im_list):
            out_name = os.path.join(
                                    args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
                                    )
            logger.info('Processing {} -> {}'.format(im_name, out_name))
            im = cv2.imread(im_name)
            timers = defaultdict(Timer)
            t = time.time()
            with c2_utils.NamedCudaScope(0):
                cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                                                                             model, im, None, timers=timers
                                                                             )
            logger.info('Inference time: {:.3f}s'.format(time.time() - t))
                for k, v in timers.items():
                    logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
                        if i == 0:
                            logger.info(
                                        ' \ Note: inference on the first image will be slower than the '
                                        'rest (caches and auto-tuning need to warm up)'
                                        )
            
            #Yunhan Yang edit
            if not os.path.exists(args.im_or_folder+ "/video"):
                os.makedirs(args.im_or_folder+ "/video")
            #Yunhan Yang edit Detectron/lib/utils/vis.py add make result as jpg than pdf

            vis_utils.vis_one_image(
                                    im[:, :, ::-1],  # BGR -> RGB for visualization
                                    im_name,
                                    args.output_dir,
                                    cls_boxes,
                                    cls_segms,
                                    cls_keyps,
                                    dataset=dummy_coco_dataset,
                                    box_alpha=0.3,
                                    show_class=True,
                                    thresh=0.7,
                                    kp_thresh=2
                                    )
Ejemplo n.º 12
0
    def _test_std(self):
        root_dir = osp.join('/private', 'home', 'xinleic', 'pyramid')
        cfg_file = osp.join(root_dir, 'configs', 'visual_genome', 'e2e_faster_rcnn_R-50-FPN_1x.yaml')
        merge_cfg_from_file(cfg_file)
        cfg.NUM_GPUS = 1
        cfg.TEST.RPN_PRE_NMS_TOP_N = 100
        cfg.TEST.RPN_POST_NMS_TOP_N = 20
        assert_and_infer_cfg()
        test_weight = osp.join(root_dir, 'outputs', 'train', 'visual_genome_train', 
                            'e2e_faster_rcnn_R-50-FPN_1x', 'RNG_SEED#3', 'model_final.pkl')
        model = test_engine.initialize_model_from_cfg(test_weight, gpu_id=0)
        dataset = JsonDataset('visual_genome_val')
        roidb = dataset.get_roidb()
        num_images = len(roidb)
        num_classes = cfg.MODEL.NUM_CLASSES
        entry = roidb[1]
        im = cv2.imread(entry['image'])
        max_level = cfg.FPN.RPN_MAX_LEVEL
        min_level = cfg.FPN.RPN_MIN_LEVEL
        # input: rpn_cls_probs_fpn2, rpn_bbox_pred_fpn2
        # output: rpn_rois_fpn2, rpn_roi_probs_fpn2
        with utils.c2.NamedCudaScope(0):
            # let's manually do the testing here
            inputs, im_scale = _get_blobs(im, None, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)

            for k, v in inputs.items():
                workspace.FeedBlob(core.ScopedName(k), v)

            workspace.RunNet(model.net.Proto().name)
            cls_probs = [core.ScopedName('rpn_cls_probs_fpn%d' % i) for i in range(min_level, max_level+1)]
            box_preds = [core.ScopedName('rpn_bbox_pred_fpn%d' % i) for i in range(min_level, max_level+1)]
            rpn_rois = [core.ScopedName('rpn_rois_fpn%d' % i) for i in range(min_level, max_level+1)]
            rpn_roi_probs = [core.ScopedName('rpn_roi_probs_fpn%d' % i) for i in range(min_level, max_level+1)]

            cls_probs = workspace.FetchBlobs(cls_probs)
            box_preds = workspace.FetchBlobs(box_preds)
            rpn_rois = workspace.FetchBlobs(rpn_rois)
            rpn_roi_probs = workspace.FetchBlobs(rpn_roi_probs)

        rpn_rois = np.vstack(rpn_rois)
        rpn_roi_probs = np.vstack(rpn_roi_probs)
        # remove the image dimension
        rpn_rois = rpn_rois[:, 1:]
        boxes = np.hstack([rpn_rois, rpn_roi_probs])
        im_name = osp.splitext(osp.basename(entry['image']))[0]
        utils.vis.vis_one_image(im[:, :, ::-1],
                                '{:s}-std-output'.format(im_name),
                                osp.join(root_dir, 'tests'),
                                boxes,
                                segms=None,
                                keypoints=None,
                                thresh=0.,
                                box_alpha=0.8,
                                dataset=dataset,
                                show_class=False) 
        workspace.ResetWorkspace()
        im_info = inputs['im_info'].astype(np.float32)

        return cls_probs, box_preds, im_info, im, im_name, root_dir, dataset
Ejemplo n.º 13
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    cfg.TRAIN.IMS_PER_BATCH = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, \
            cls_refined_segms, cls_refined_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            os.path.join(args.output_dir, 'vis_local'),
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            os.path.join(args.output_dir, 'vis_refined'),
            cls_boxes,
            cls_refined_segms,
            cls_refined_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
Ejemplo n.º 14
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
        t1 = time.time()
        im = vis_utils.vis_one_image_opencv(im,
                                            cls_boxes,
                                            segms=cls_segms,
                                            keypoints=cls_keyps,
                                            thresh=0.7,
                                            kp_thresh=2,
                                            show_box=True,
                                            dataset=dummy_coco_dataset,
                                            show_class=True)
        # vis_utils.vis_one_image(
        #     im[:, :, ::-1],  # BGR -> RGB for visualization
        #     im_name,
        #     args.output_dir,
        #     cls_boxes,
        #     cls_segms,
        #     cls_keyps,
        #     dataset=dummy_coco_dataset,
        #     box_alpha=0.3,
        #     show_class=True,
        #     thresh=0.7,
        #     kp_thresh=2
        # )
        t2 = time.time() - t1
        print("vis time %f ms" % (t2 * 1000))
        cv2.imwrite(
            args.output_dir + '/' + im_name.split('/')[-1].split('.')[0] +
            '.jpg', im)
Ejemplo n.º 15
0
def get_model(cfg_file, weights_file):
    merge_cfg_from_file(cfg_file)
    cfg.TRAIN.WEIGHTS = ''  # NOTE: do not download pretrained model weights
    cfg.TEST.WEIGHTS = weights_file
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    return model
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    # for i, im_name in enumerate(im_list):
        # out_name = os.path.join(
        #     args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        # )
        # logger.info('Processing {} -> {}'.format(im_name, out_name))

    #set webcam
    cam = cv2.VideoCapture(0)
    while True:
        #Fetch image from camera
        ret_val, im = cam.read()
        #uncomment to resize image
        #im = cv2.resize(im, (1200,1024))

        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            "dummy_name",
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )

    cv2.destroyAllWindows()
Ejemplo n.º 17
0
 def __init__(self, config, **kwargs):
     kwargs['handlers'] = [(r'/analyse', AnalyseHandler)]
     super(Application, self).__init__(**kwargs)
     merge_cfg_from_file(config.get('DETECTRON', 'CONFIG'))
     cfg.TEST.WEIGHTS = config.get('DETECTRON', 'WEIGHTS')
     cfg.NUM_GPUS = 1
     assert_and_infer_cfg()
     self.model = infer_engine.initialize_model_from_cfg()
     self.dummy_coco_dataset = dummy_datasets.get_coco_dataset()
Ejemplo n.º 18
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()

    predict_dataset(args.project, visualize=False, randomize=False)
Ejemplo n.º 19
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    start = timeit.default_timer()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    ##extract bboxes from bottom-up attention model
    image_bboxes = {}
    if args.bbox_file is not None:
        image_bboxes = extract_bboxes(args.bbox_file)

    count = 0
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    for i, im_name in enumerate(im_list):
        im_base_name = os.path.basename(im_name)
        image_id = int(im_base_name.split(".")[0].split("_")[-1])  ##for COCO
        if image_id % args.total_group == args.group_id:
            bbox = image_bboxes[image_id] if image_id in image_bboxes else None
            im = cv2.imread(im_name)
            if im is not None:
                outfile = os.path.join(args.output_dir,
                                       im_base_name.replace('jpg', 'npy'))
                lock_folder = outfile.replace('npy', 'lock')
                if not os.path.exists(lock_folder) and os.path.exists(outfile):
                    continue
                if not os.path.exists(lock_folder):
                    os.makedirs(lock_folder)

                result = get_detections_from_im(cfg,
                                                model,
                                                im,
                                                image_id,
                                                args.feat_name,
                                                args.min_bboxes,
                                                args.max_bboxes,
                                                bboxes=bbox)

                np.save(outfile, result)
                os.rmdir(lock_folder)

            count += 1

            if count % 100 == 0:
                end = timeit.default_timer()
                epoch_time = end - start
                print('process {:d} images after {:.1f} s'.format(
                    count, epoch_time))
Ejemplo n.º 20
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    logger.info('about to assert')
    assert_and_infer_cfg()
    logger.info('About to initialise model')
    model = infer_engine.initialize_model_from_cfg()
    logger.info("Done initialising")
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.png'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        segmented_images, classes, scores, segmented_binary_masks = vis_utils.segmented_images_in_original_image_size(
            im,
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        found = False
        for index, value in enumerate(segmented_images):
            if classes[index] == args.class_label and not found:
                logger.info('Writing output file to: {}'.format(str(i)))
                bin_mask = vis_utils.vis_binary_mask(
                    im, segmented_binary_masks[index])
                cv2.imwrite(out_name, value)
                cv2.imwrite(out_name.rstrip(".png") + "bin.png", bin_mask)
                found = True
Ejemplo n.º 21
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    """
    Add support for webcam
    """
    # Set and get camera from OpenCV
    cam = cv2.VideoCapture(0)

    im_name = 'tmp_im'

    while True:
        # Fetch image from camera
        _, im = cam.read()

        timers = defaultdict(Timer)
        t = time.time()

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
            ext='jpg'  # default is PDF, but we want JPG.
        )

    cv2.destroyAllWindows()
Ejemplo n.º 22
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        # im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
        im_list = search(args.im_or_folder, args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
    #for i, im_name in im_list:
        #out_name = os.path.join(
         #   args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        #)
        out_name = im_name.replace(args.im_or_folder, args.output_dir)
        par_path = os.path.dirname(out_name)
        if not os.path.exists(par_path):
            os.makedirs(par_path)
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            par_path,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
            ext='png'
        )
Ejemplo n.º 23
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = (
        dummy_datasets.get_vg3k_dataset()
        if args.use_vg3k else dummy_datasets.get_coco_dataset())

    if os.path.isdir(args.im_or_folder):
        im_list = sorted(glob.iglob(args.im_or_folder + '/*.' + args.image_ext))
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.npz')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        if im is None:
            logger.info('Unable to read image, skipping.')
            continue
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        boxes, segms, classes = convert(cls_boxes, cls_segms)
        classes = np.array(classes, dtype=np.uint16)
        resolution = segms[0]['size']
        segms = np.array([x['counts'] for x in segms]) # Run-length encoding
        
        valid = boxes[:, 4] >= args.thresh
        if args.filter_classes:
            valid &= np.isin(classes, all_classes)
            
        boxes = boxes[valid].copy()
        classes = classes[valid].copy()
        segms = segms[valid].copy()
        
        output_name = os.path.basename(im_name)
        np.savez(args.output_dir + '/' + output_name, boxes=boxes, segments=segms, classes=classes, resolution=resolution)
Ejemplo n.º 24
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    test_ids = []
    rles = []

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        new_test_ids, new_rles = vis_utils.make_submission(im[:, :, ::-1],
                                                           im_name,
                                                           cls_boxes,
                                                           cls_segms,
                                                           cls_keyps,
                                                           thresh=0.7)

        test_ids.extend(new_test_ids)
        rles.extend(new_rles)

    import pandas as pd
    sub = pd.DataFrame()
    sub['ImageId'] = test_ids
    sub['EncodedPixels'] = pd.Series(rles).apply(
        lambda x: ' '.join(str(y) for y in x))
    sub.to_csv(args.output_dir +
               '/e2e_mask_rcnn_R-50-FPN_1x-lr3e-3-nuclei-6-new.csv',
               index=False)
Ejemplo n.º 25
0
def main(args):
    logger = logging.getLogger(__name__)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = yaml.load(yaml.dump(cfg))
    im = cv2.imread(args.im_file)

    if args.rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps = None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        cfg.immutable(False)
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            weights_file = pkl
        else:
            weights_file = cfg.TEST.WEIGHTS
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg()
        model = model_engine.initialize_model_from_cfg(weights_file)
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ = \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        workspace.ResetWorkspace()

    out_name = os.path.join(
        args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf')
    )
    logger.info('Processing {} -> {}'.format(args.im_file, out_name))

    vis_utils.vis_one_image(
        im[:, :, ::-1],
        args.im_file,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2
    )
Ejemplo n.º 26
0
def get_rpn_box_proposals(im, args):
    merge_cfg_from_file(args.rpn_cfg)
    cfg.TEST.WEIGHTS = args.rpn_pkl
    cfg.NUM_GPUS = 1
    cfg.MODEL.RPN_ONLY = True
    cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000
    assert_and_infer_cfg()

    model = model_engine.initialize_model_from_cfg()
    with c2_utils.NamedCudaScope(0):
        boxes, scores = rpn_engine.im_proposals(model, im)
    return boxes, scores
Ejemplo n.º 27
0
def get_rpn_box_proposals(im, args):
    merge_cfg_from_file(args.rpn_cfg)
    cfg.TEST.WEIGHTS = args.rpn_pkl
    cfg.NUM_GPUS = 1
    cfg.MODEL.RPN_ONLY = True
    cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000
    assert_and_infer_cfg()

    model = model_engine.initialize_model_from_cfg()
    with c2_utils.NamedCudaScope(0):
        boxes, scores = rpn_engine.im_proposals(model, im)
    return boxes, scores
Ejemplo n.º 28
0
    def __init__(self):
        c2_utils.import_detectron_ops()
        # OpenCL may be enabled by default in OpenCV3; disable it because it's not
        # thread safe and causes unwanted GPU memory allocations.
        cv2.ocl.setUseOpenCL(False)

        merge_cfg_from_file(
            '/home/king/Documents/measurement/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml'
        )
        cfg.NUM_GPUS = 1
        weights = '/home/king/Documents/measurement/model_final.pkl'
        assert_and_infer_cfg(cache_urls=False)
        self.model = infer_engine.initialize_model_from_cfg(weights)
Ejemplo n.º 29
0
def main(args):
    logger = logging.getLogger(__name__)

    args.cfg = '/detectron/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'
    args.weights = '/data/workspace/fbdet/models/mask_rcnn_R_101_FPN_2x/model_final.pkl'

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    im_list = []
    im_list.append('/data/workspace/fbdet/test_pic/11.jpg')
    video = cv2.VideoCapture('/data/pic/valid_video_00.avi')
    frame = 0
    while (True):
        if frame > 0:
            break
        # ret, im = video.read()
        # if im is None or ret is None:
        #     print("video.read() fail || video.read() is end!")
        #     break
        im = cv2.imread(im_list[0])
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        print('-----------------------------', frame)
        boxs_list = vis_utils.get_boxes_image(cls_boxes,
                                              cls_segms,
                                              cls_keyps,
                                              thresh=0.7,
                                              dataset=dummy_coco_dataset)
        print(boxs_list)
        print('-----------------------------')
        for i in range(len(boxs_list)):
            box = boxs_list[i]
            drawBoxOnImg(im, box[1], box[2], box[3], box[4], 0, 0, frame)
        frame += 1
Ejemplo n.º 30
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    for root_dir_path_1, sub_dir_path_list_1, sub_file_path_list_1 in os.walk(
            args.im_or_folder):
        sub_dir_path_list_1 = sorted(sub_dir_path_list_1)
        for i, sub_dir_path_1 in enumerate(sub_dir_path_list_1):
            for root_dir_path_2, sub_dir_path_list_2, sub_file_path_list_2 in os.walk(
                    os.path.join(root_dir_path_1, sub_dir_path_1)):
                sub_file_path_list_2 = sorted(sub_file_path_list_2)
                out_file = open(
                    os.path.join(args.output_dir,
                                 sub_dir_path_1 + "_Det_ffasta.txt"), "wb")
                for img_idx, sub_file_path_2 in enumerate(
                        sub_file_path_list_2):
                    im = cv2.imread(
                        os.path.join(root_dir_path_2, sub_file_path_2))
                    timers = defaultdict(Timer)
                    t = time.time()
                    if (img_idx + 1) % 1000 == 0:
                        sys.stdout.write(
                            "\rFinish {} images\n".format(img_idx + 1))
                        sys.stdout.flush()
                    with c2_utils.NamedCudaScope(0):
                        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                            model, im, None, timers=timers)
                        if isinstance(cls_boxes, list):
                            cls_boxes, cls_segms, cls_keyps, classes = vis_utils.convert_from_cls_format(
                                cls_boxes, cls_segms, cls_keyps)
                        if cls_boxes is None or cls_boxes.shape[0] == 0:
                            continue
                        obj_idx = 0
                        for cls_box, cls in zip(cls_boxes, classes):
                            if int(cls) != 3 and int(cls) != 6:
                                continue
                            out_file.write("{},{},{},{},{},{},{}\n".format(
                                img_idx + 1, obj_idx + 1, cls_box[0],
                                cls_box[1], cls_box[2] - cls_box[0],
                                cls_box[3] - cls_box[1], cls_box[4]))
                            obj_idx += 1
                out_file.close()
            print("Finish {} / {} of video sequences".format(
                i + 1, len(sub_dir_path_list_1)))
        break
Ejemplo n.º 31
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.TEST.WEIGHTS = args.weights
    cfg.NUM_GPUS = 1
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg()
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )
Ejemplo n.º 32
0
def main():
    logger = logging.getLogger(__name__)
    merge_cfg_from_file('/detectron/e2e_mask_rcnn_R-101-FPN_2x.yaml')
    cfg.NUM_GPUS = 4
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg('/detectron/models/model_final.pkl')
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    # cam = cv2.VideoCapture("rtsp://192.168.128.12:554/mpeg4cif")
    # cam = cv2.VideoCapture("rtsp://192.168.128.11:554/av0_1")
    # cam = cv2.VideoCapture("http://192.168.128.14/video.cgi")
    n = 0
    tmp_file_name = '/tmp/tmp.jpg'
    im0 = 0
    im1 = 0
    while True:
        # ret_val, im = cam.read()
        # cv2.imwrite(tmp_file_name, im)
        im = cv2.imread('/detectron/k2m30/img/' + str(n) + '.jpg')
        timers = defaultdict(Timer)
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        data = vis_utils.vis_one_image_opencv(
            im,  # BGR -> RGB for visualization
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            # box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
            # ext='png'
        )
        # time.sleep(0.1)
        # if data == None:
        #     logger.info(cls_boxes)
        #
        # else:
        n += 1
        n = n % 1000

        file_name = '/tmp/' + str(n) + '.jpg'
        cv2.imwrite(file_name, data)
        logger.info(str(n) + ' saved')
        logger.info(os.path.getsize(file_name))
Ejemplo n.º 33
0
def main(args):
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    cfg_orig = yaml.load(yaml.dump(cfg))
    im = cv2.imread(args.im_file)

    if args.rpn_pkl is not None:
        proposal_boxes, _proposal_scores = get_rpn_box_proposals(im, args)
        workspace.ResetWorkspace()
    else:
        proposal_boxes = None

    cls_boxes, cls_segms, cls_keyps = None, None, None
    for i in range(0, len(args.models_to_run), 2):
        pkl = args.models_to_run[i]
        yml = args.models_to_run[i + 1]
        merge_cfg_from_cfg(cfg_orig)
        merge_cfg_from_file(yml)
        if len(pkl) > 0:
            cfg.TEST.WEIGHTS = pkl
        cfg.NUM_GPUS = 1
        assert_and_infer_cfg()
        model = model_engine.initialize_model_from_cfg()
        with c2_utils.NamedCudaScope(0):
            cls_boxes_, cls_segms_, cls_keyps_ = \
                model_engine.im_detect_all(model, im, proposal_boxes)
        cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
        cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
        cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
        workspace.ResetWorkspace()

    vis_utils.vis_one_image(
        im[:, :, ::-1],
        args.im_file,
        args.output_dir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dummy_coco_dataset,
        box_alpha=0.3,
        show_class=True,
        thresh=0.7,
        kp_thresh=2
    )
Ejemplo n.º 34
0
def load_model(args):
    model = test_engine.initialize_model_from_cfg()
    blobs = mutils.get_ws_blobs()

    return model, blobs