def check_args(args):
    '''determine if is None'''
    assert (
        (args.rpn_pkl is not None and args.rpn_cfg is not None) or
        (args.rpn_pkl is None and args.rpn_cfg is None)
    )
    
    if args.rpn_pkl is not None:
        '''Download the file specified by the URL to the cache_dir and return the path to the cached file. 
        If the argument is not a URL, simply return it as is.'''
        args.rpn_pkl = cache_url(args.rpn_pkl, cfg.DOWNLOAD_CACHE)
        assert os.path.exists(args.rpn_pkl)
        assert os.path.exists(args.rpn_cfg)
    
        
    if args.models_to_run is not None:
        '''determine if length ==2'''
        assert len(args.models_to_run) % 2 == 0
        
        for i, model_file in enumerate(args.models_to_run):
            if len(model_file) > 0:
                if i % 2 == 0:
                    '''download 2nd model file'''
                    model_file = cache_url(model_file, cfg.DOWNLOAD_CACHE)
                    args.models_to_run[i] = model_file
                assert os.path.exists(model_file), '\'{}\' does not exist'.format(model_file)
Exemple #2
0
def cache_cfg_urls():
    """Download URLs in the config, cache them locally, and rewrite cfg to make
    use of the locally cached file.
    """
    __C.TRAIN.WEIGHTS = cache_url(__C.TRAIN.WEIGHTS, __C.DOWNLOAD_CACHE)
    __C.TEST.WEIGHTS = cache_url(__C.TEST.WEIGHTS, __C.DOWNLOAD_CACHE)
    __C.TRAIN.PROPOSAL_FILES = tuple(
        [cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TRAIN.PROPOSAL_FILES]
    )
    __C.TEST.PROPOSAL_FILES = tuple(
        [cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TEST.PROPOSAL_FILES]
    )
Exemple #3
0
def main(args):
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, timers=timers)
        image = vis_utils.vis_one_image_opencv(np.array(frame),
                                               cls_boxes,
                                               cls_segms,
                                               cls_keyps,
                                               thresh=0.7,
                                               kp_thresh=2,
                                               show_box=True,
                                               show_class=True)
        cv2.imshow('camera', image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        print("Time:", time.time() - t)
    cap.release()
    cv2.destroyAllWindows()
    def build_graph(self):
        c2_utils.import_detectron_ops()
        # OpenCL may be enabled by default in OpenCV3; disable it because it's not
        # thread safe and causes unwanted GPU memory allocations.
        cv2.ocl.setUseOpenCL(False)

        merge_cfg_from_file(self.config.args['config_path'])

        # If this is a CPU kernel, tell Caffe2 that it should not use
        # any GPUs for its graph operations
        cpu_only = True
        for handle in self.config.devices:
            if handle.type == DeviceType.GPU.value:
                cpu_only = False

        if cpu_only:
            cfg.NUM_GPUS = 0
        else:
            cfg.NUM_GPUS = 1
        # TODO: wrap this in "with device"
        weights_path = cache_url(self.config.args['weights_path'],
                                 cfg.DOWNLOAD_CACHE)
        assert_and_infer_cfg(cache_urls=False)
        model = infer_engine.initialize_model_from_cfg(weights_path)
        return model
Exemple #5
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    start = timeit.default_timer()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + "/*." + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    # extract bboxes from bottom-up attention model
    image_bboxes = {}
    if args.bbox_file is not None:
        image_bboxes = extract_bboxes(args.bbox_file)

    count = 0
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    for i, im_name in enumerate(im_list):
        im_base_name = os.path.basename(im_name)
        image_id = int(im_base_name.split(".")[0].split("_")[-1])  # for COCO
        if image_id % args.total_group == args.group_id:
            bbox = image_bboxes[image_id] if image_id in image_bboxes else None
            im = cv2.imread(im_name)
            if im is not None:
                outfile = os.path.join(
                    args.output_dir, im_base_name.replace("jpg", "npy")
                )
                lock_folder = outfile.replace("npy", "lock")
                if not os.path.exists(lock_folder) and os.path.exists(outfile):
                    continue
                if not os.path.exists(lock_folder):
                    os.makedirs(lock_folder)

                result = get_detections_from_im(
                    cfg,
                    model,
                    im,
                    image_id,
                    args.feat_name,
                    args.min_bboxes,
                    args.max_bboxes,
                    background=args.background,
                    bboxes=bbox,
                )

                np.save(outfile, result)
                os.rmdir(lock_folder)

            count += 1

            if count % 100 == 0:
                end = timeit.default_timer()
                epoch_time = end - start
                print("process {:d} images after {:.1f} s".format(count, epoch_time))
def main(args):

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    # get the weight path
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    det_dir = os.path.join(args.input_dir, 'det')
    if not os.path.exists(det_dir):
        os.makedirs(det_dir)

    txt_file = os.path.join(det_dir, "det.txt")
    fid = open(txt_file, 'w')

    img_dir = os.path.join(args.input_dir, "img1")
    img_list = os.listdir(img_dir)
    img_list = sorted(img_list)

    for i in range(len(img_list)):
        print("processing: %d/%d" % (i + 1, len(img_list)))
        img_name = img_list[i][:-4]
        img_idx = int(img_name)
        img_path = os.path.join(img_dir, img_list[i])
        frame = cv2.imread(img_path)

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, None)

        vis_utils.write_txt(fid, img_idx, cls_boxes)

    fid.close()
Exemple #7
0
def check_args(args):
    assert ((args.rpn_pkl is not None and args.rpn_cfg is not None)
            or (args.rpn_pkl is None and args.rpn_cfg is None))
    if args.rpn_pkl is not None:
        args.rpn_pkl = cache_url(args.rpn_pkl, cfg.DOWNLOAD_CACHE)
        assert os.path.exists(args.rpn_pkl)
        assert os.path.exists(args.rpn_cfg)
    if args.models_to_run is not None:
        assert len(args.models_to_run) % 2 == 0
        for i, model_file in enumerate(args.models_to_run):
            if len(model_file) > 0:
                if i % 2 == 0:
                    model_file = cache_url(model_file, cfg.DOWNLOAD_CACHE)
                    args.models_to_run[i] = model_file
                assert os.path.exists(model_file), \
                    '\'{}\' does not exist'.format(model_file)
Exemple #8
0
def check_args(args):
    assert (
        (args.rpn_pkl is not None and args.rpn_cfg is not None) or
        (args.rpn_pkl is None and args.rpn_cfg is None)
    )
    if args.rpn_pkl is not None:
        args.rpn_pkl = cache_url(args.rpn_pkl, cfg.DOWNLOAD_CACHE)
        assert os.path.exists(args.rpn_pkl)
        assert os.path.exists(args.rpn_cfg)
    if args.models_to_run is not None:
        assert len(args.models_to_run) % 2 == 0
        for i, model_file in enumerate(args.models_to_run):
            if len(model_file) > 0:
                if i % 2 == 0:
                    model_file = cache_url(model_file, cfg.DOWNLOAD_CACHE)
                    args.models_to_run[i] = model_file
                assert os.path.exists(model_file), \
                    '\'{}\' does not exist'.format(model_file)
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        # im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
        im_list = search(args.im_or_folder, args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
    #for i, im_name in im_list:
        #out_name = os.path.join(
         #   args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        #)
        out_name = im_name.replace(args.im_or_folder, args.output_dir)
        par_path = os.path.dirname(out_name)
        if not os.path.exists(par_path):
            os.makedirs(par_path)
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            par_path,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2,
            ext='png'
        )
Exemple #10
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = (
        dummy_datasets.get_vg3k_dataset()
        if args.use_vg3k else dummy_datasets.get_coco_dataset())

    if os.path.isdir(args.im_or_folder):
        im_list = sorted(glob.iglob(args.im_or_folder + '/*.' + args.image_ext))
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.npz')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        if im is None:
            logger.info('Unable to read image, skipping.')
            continue
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        boxes, segms, classes = convert(cls_boxes, cls_segms)
        classes = np.array(classes, dtype=np.uint16)
        resolution = segms[0]['size']
        segms = np.array([x['counts'] for x in segms]) # Run-length encoding
        
        valid = boxes[:, 4] >= args.thresh
        if args.filter_classes:
            valid &= np.isin(classes, all_classes)
            
        boxes = boxes[valid].copy()
        classes = classes[valid].copy()
        segms = segms[valid].copy()
        
        output_name = os.path.basename(im_name)
        np.savez(args.output_dir + '/' + output_name, boxes=boxes, segments=segms, classes=classes, resolution=resolution)
def main(args):
    logger = logging.getLogger(__name__)

    args.cfg = '/detectron/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'
    args.weights = '/data/workspace/fbdet/models/mask_rcnn_R_101_FPN_2x/model_final.pkl'

    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]
    im_list = []
    im_list.append('/data/workspace/fbdet/test_pic/11.jpg')
    video = cv2.VideoCapture('/data/pic/valid_video_00.avi')
    frame = 0
    while (True):
        if frame > 0:
            break
        # ret, im = video.read()
        # if im is None or ret is None:
        #     print("video.read() fail || video.read() is end!")
        #     break
        im = cv2.imread(im_list[0])
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        print('-----------------------------', frame)
        boxs_list = vis_utils.get_boxes_image(cls_boxes,
                                              cls_segms,
                                              cls_keyps,
                                              thresh=0.7,
                                              dataset=dummy_coco_dataset)
        print(boxs_list)
        print('-----------------------------')
        for i in range(len(boxs_list)):
            box = boxs_list[i]
            drawBoxOnImg(im, box[1], box[2], box[3], box[4], 0, 0, frame)
        frame += 1
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    for root_dir_path_1, sub_dir_path_list_1, sub_file_path_list_1 in os.walk(
            args.im_or_folder):
        sub_dir_path_list_1 = sorted(sub_dir_path_list_1)
        for i, sub_dir_path_1 in enumerate(sub_dir_path_list_1):
            for root_dir_path_2, sub_dir_path_list_2, sub_file_path_list_2 in os.walk(
                    os.path.join(root_dir_path_1, sub_dir_path_1)):
                sub_file_path_list_2 = sorted(sub_file_path_list_2)
                out_file = open(
                    os.path.join(args.output_dir,
                                 sub_dir_path_1 + "_Det_ffasta.txt"), "wb")
                for img_idx, sub_file_path_2 in enumerate(
                        sub_file_path_list_2):
                    im = cv2.imread(
                        os.path.join(root_dir_path_2, sub_file_path_2))
                    timers = defaultdict(Timer)
                    t = time.time()
                    if (img_idx + 1) % 1000 == 0:
                        sys.stdout.write(
                            "\rFinish {} images\n".format(img_idx + 1))
                        sys.stdout.flush()
                    with c2_utils.NamedCudaScope(0):
                        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                            model, im, None, timers=timers)
                        if isinstance(cls_boxes, list):
                            cls_boxes, cls_segms, cls_keyps, classes = vis_utils.convert_from_cls_format(
                                cls_boxes, cls_segms, cls_keyps)
                        if cls_boxes is None or cls_boxes.shape[0] == 0:
                            continue
                        obj_idx = 0
                        for cls_box, cls in zip(cls_boxes, classes):
                            if int(cls) != 3 and int(cls) != 6:
                                continue
                            out_file.write("{},{},{},{},{},{},{}\n".format(
                                img_idx + 1, obj_idx + 1, cls_box[0],
                                cls_box[1], cls_box[2] - cls_box[0],
                                cls_box[3] - cls_box[1], cls_box[4]))
                            obj_idx += 1
                out_file.close()
            print("Finish {} / {} of video sequences".format(
                i + 1, len(sub_dir_path_list_1)))
        break
Exemple #13
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg()
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
        )
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers
            )
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)'
            )

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )
def predict(cfg_path, weights_path):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(cfg_path)
    cfg.NUM_GPUS = 1
    gpu_id = 0  #指定GPU
    weights_path = cache_url(weights_path, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(weights_path, gpu_id)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    #dummy_coco_dataset = dummy_datasets.get_illbuild_class11_dataset()

    print("model is ok")
    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.jpg'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(gpu_id):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
Exemple #15
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = (dummy_datasets.get_vg3k_dataset() if args.use_vg3k
                          else dummy_datasets.get_coco_dataset())

    if args.im_or_folder == 'train0':
        #im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
        im_list = cPickle.load(open('train_list.pkl'))
        pre_folder = '/mnt/storage/jialinwu/detectron/detectron/datasets/data/coco/coco_train2014'
        im_list = im_list[:40000]
    elif args.im_or_folder == 'train1':
        im_list = cPickle.load(open('train_list.pkl'))
        pre_folder = '/mnt/storage/jialinwu/detectron/detectron/datasets/data/coco/coco_train2014'
        im_list = im_list[40000:]
    elif args.im_or_folder == 'val':
        im_list = cPickle.load(open('val_list.pkl'))
        pre_folder = '/mnt/storage/jialinwu/detectron/detectron/datasets/data/coco/coco_val2014'
    elif args.im_or_folder == 'test0':
        im_list = cPickle.load(open('test_list.pkl'))
        im_list = im_list[:40000]
        pre_folder = '../ARNet/image_captioning/data/images/test2015'
    elif args.im_or_folder == 'test1':
        im_list = cPickle.load(open('test_list.pkl'))
        im_list = im_list[40000:]
        pre_folder = '../ARNet/image_captioning/data/images/test2015'
    for i in tqdm(range(len(im_list))):
        im_name = pre_folder + '/' + im_list[i]
        if im_name[-4:] != '.jpg':
            continue
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        imgid = int(im_list[i][:-4].split('_')[-1])
        if args.im_or_folder == 'val':
            save_name = '/mnt/storage/jialinwu/seg_every_thing/npz_features/coco_val2014/%d.npz' % imgid
        else:
            save_name = '/mnt/storage/jialinwu/seg_every_thing/npz_features/coco_train2014/%d.npz' % imgid
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, save_name, timers=timers)
        '''
Exemple #16
0
def main(args):
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)

    if args.info is None and args.scene_dir is None and args.video_file is not None:  # video only
        _main(args, model)
    elif args.info is None and args.scene_dir is not None and args.video_file is None:  # scene dir only
        derived_dir = os.path.join(args.scene_dir, 'derived')
        for cam_dir in glob.glob(os.path.join(derived_dir, 'cam_*')):
            video_file = glob.glob(os.path.join(cam_dir, '*.mp4'))[0]
            args.video_file = video_file
            _main(args, model)
    elif args.info is not None and args.scene_dir is None and args.video_file is None:  # info only
        _main(args, model)
    else:
        raise ValueError(
            'Must specify either scene-dir or video_file not both.')
            box = boxs_list[i]
            drawBoxOnImg(im, box[1], box[2], box[3], box[4], 0, 0, frame)
        frame += 1


workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
utils.logging.setup_logging(__name__)
logger = logging.getLogger(__name__)
#args = parse_args()
# args.cfg = '/detectron/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'
# args.weights = '/data/workspace/fbdet/models/mask_rcnn_R_101_FPN_2x/model_final.pkl'
cfg_path = '/detectron/configs/12_2017_baselines/e2e_faster_rcnn_X-101-64x4d-FPN_1x.yaml'
weights_path = '/data/workspace/fbdet/models/X_101_64x4d_FPN_faster/model_final.pkl'
merge_cfg_from_file(cfg_path)
cfg.NUM_GPUS = 1
weights1 = cache_url(weights_path, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine.initialize_model_from_cfg(weights1)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()


def pipeline_mask(im):

    if im is None:
        print("im is null!")

    timers = defaultdict(Timer)
    #t = time.time()
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
            model, im, None, timers=timers)
Exemple #18
0
def main(args):

    #just show box and mask
    cfg_file = r'/home/twang/Documents/detectron/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'
    weights_file = r'/home/twang/Documents/detectron/model-weights/mask_rcnn_R-101-FPN_2x_model_final.pkl'

    #==============================================================================
    #     #show keypoint
    #     cfg_file = r'/home/twang/Documents/detectron/model-weights/e2e_KeyPoint_Mask_RCNN/e2e_keypoint_rcnn_X-101-64x4d-FPN_1x.yaml'
    #     weights_file = r'/home/twang/Documents/detectron/model-weights/e2e_KeyPoint_Mask_RCNN/X-101-64x4d-FPN_1x.pkl'
    #
    #==============================================================================
    merge_cfg_from_file(cfg_file)
    cfg.NUM_GPUS = 2
    weights = cache_url(weights_file, cfg.DOWNLOAD_CACHE)

    assert_and_infer_cfg()

    model = infer_engine.initialize_model_from_cfg(weights)

    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    Person_Gender_CKPT = '/home/twang/Documents/tensorflow-classfication2/train-slim/trained-models/airport_gender3/training_all_layers/vgg_16_750000'
    Person_Gender_LABELS = os.path.join(
        '/home/twang/Documents/tensorflow-classfication2/gender-airport-entrance',
        'labels.txt')
    Person_Gender_category_index = load_labels(Person_Gender_LABELS)

    Person_Gender_gpu_option = tf.GPUOptions(
        per_process_gpu_memory_fraction=0.2)

    Gender = Person_Gender(Person_Gender_CKPT, Person_Gender_category_index,
                           Person_Gender_gpu_option)

    video_dir = args.video_dir

    cap = cv2.VideoCapture(video_dir)

    video_box_mask = cv2.VideoWriter(
        'out_box_mask.mp4', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 5,
        (1280, 720))

    while cap.isOpened():

        t1 = time.time()

        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, dsize=(1280, 720))

        timers = defaultdict(Timer)

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, timers=timers)

        thresh = 0.7

        show_box = True
        show_class = True
        crop_person = True
        filter_big_box = True

        dataset = dummy_coco_dataset

        frame_for_person_crop = frame.copy()
        frame_for_box = frame.copy()
        frame_for_mask = frame.copy()
        frame_for_both = frame.copy()
        """Constructs a numpy array with the detections visualized."""
        if isinstance(cls_boxes, list):
            boxes, segms, keypoints, classes = convert_from_cls_format(
                cls_boxes, cls_segms, cls_keyps)

        if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
            return frame

        if segms is not None and len(segms) > 0:
            masks = mask_util.decode(segms)
            color_list = colormap()
            color_list_selected = color_list[0:5]

        else:
            color_list = colormap()
            color_list_selected = color_list[0:5]

        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

        for i in sorted_inds:
            bbox = boxes[i, :4]
            score = boxes[i, -1]
            if score < thresh:
                continue

            # show class (person, backpack, handbag, suitcase)
            class_default = ['person', 'backpack', 'handbag', 'suitcase']
            if show_class:
                class_str, class_text = get_class_string(
                    classes[i], score, dataset)

                if class_text in class_default:

                    frame = vis_class(frame, (bbox[0], bbox[1] - 2), class_str)

                    frame_for_both = vis_class(frame_for_both,
                                               (bbox[0], bbox[1] - 2),
                                               class_str)

                    #filter big box
                    if filter_big_box:

                        aspect_ratio = (bbox[2] - bbox[0]) / (bbox[3] -
                                                              bbox[1])

                        if aspect_ratio < 1.5:

                            #show bounding box
                            if show_box:
                                frame = vis_bbox(frame,
                                                 (bbox[0], bbox[1], bbox[2] -
                                                  bbox[0], bbox[3] - bbox[1]))

                                frame_for_box = vis_bbox(
                                    frame_for_box,
                                    (bbox[0], bbox[1], bbox[2] - bbox[0],
                                     bbox[3] - bbox[1]))
                                frame_for_both = vis_bbox(
                                    frame_for_both,
                                    (bbox[0], bbox[1], bbox[2] - bbox[0],
                                     bbox[3] - bbox[1]))

                            # crop each person box, recognize gender
                            if crop_person and class_text == 'person':

                                (x1, y1, w, h) = (int(bbox[0]), int(bbox[1]),
                                                  int(bbox[2] - bbox[0]),
                                                  int(bbox[3] - bbox[1]))
                                x2 = x1 + w
                                y2 = y1 + h

                                cropped = frame_for_person_crop[y1:y2, x1:x2]

                                cv2.imwrite(
                                    '/home/twang/Documents/detectron/temp/Hk-demo/genderwrite.jpg',
                                    cropped)

                                gender_frame_saved = '/home/twang/Documents/detectron/temp/Hk-demo/genderwrite.jpg'
                                prob = Gender.predict(gender_frame_saved)

                                if prob[0] > prob[1]:

                                    #show class name and probality
                                    frame_for_both = vis_class(
                                        frame_for_both, (x1, y1 + 10),
                                        "Female:%.1f" % (prob[0]))

                                    #show female mask
                                    color_mask_female = color_list_selected[
                                        0, 0:3]
                                    frame_for_both = vis_mask(
                                        frame_for_both, masks[..., i],
                                        color_mask_female)

                                    frame_for_box = vis_class(
                                        frame_for_box, (x1, y1 + 10),
                                        "Female:%.1f" % (prob[0]))
                                    frame_for_box = vis_mask(
                                        frame_for_box, masks[..., i],
                                        color_mask_female)

                                else:

                                    #show class name and probality
                                    frame_for_both = vis_class(
                                        frame_for_both, (x1, y1 + 10),
                                        "Male:%.1f" % (prob[1]))

                                    #show male mask
                                    color_mask_male = color_list_selected[1,
                                                                          0:3]
                                    frame_for_both = vis_mask(
                                        frame_for_both, masks[..., i],
                                        color_mask_male)

                                    frame_for_box = vis_class(
                                        frame_for_box, (x1, y1 + 10),
                                        "Male:%.1f" % (prob[1]))
                                    frame_for_box = vis_mask(
                                        frame_for_box, masks[..., i],
                                        color_mask_male)

                            #show mask, different other category has deifferent color
                            if segms is not None and len(segms) > i:
                                if class_text == 'backpack':
                                    color_mask = color_list_selected[2, 0:3]
                                    frame_for_both = vis_mask(
                                        frame_for_both, masks[..., i],
                                        color_mask)
                                elif class_text == 'handbag':
                                    color_mask = color_list_selected[3, 0:3]
                                    frame_for_both = vis_mask(
                                        frame_for_both, masks[..., i],
                                        color_mask)
                                elif class_text == 'suitcase':
                                    color_mask = color_list_selected[4, 0:3]
                                    frame_for_both = vis_mask(
                                        frame_for_both, masks[..., i],
                                        color_mask)

                    else:
                        #show bounding box
                        if show_box:
                            frame = vis_bbox(frame,
                                             (bbox[0], bbox[1], bbox[2] -
                                              bbox[0], bbox[3] - bbox[1]))

                        # crop each box
                        if crop_person and class_text == 'person':

                            (x1, y1, w, h) = (int(bbox[0]), int(bbox[1]),
                                              int(bbox[2] - bbox[0]),
                                              int(bbox[3] - bbox[1]))
                            x2 = x1 + w
                            y2 = y1 + h

                            cropped = frame_for_person_crop[y1:y2, x1:x2]

                            cv2.imwrite(
                                '/home/twang/Documents/detectron/temp/Hk-demo/genderwrite.jpg',
                                cropped)

                            gender_frame_saved = '/home/twang/Documents/detectron/temp/Hk-demo/genderwrite.jpg'
                            prob = Gender.predict(gender_frame_saved)

                            if prob[0] > prob[1]:

                                cv2.putText(frame,
                                            "Female:%.1f" % (prob[0] * 100),
                                            (x1, y1 + 10),
                                            cv2.FONT_HERSHEY_SIMPLEX,
                                            0.35,
                                            _GRAY,
                                            lineType=cv2.LINE_AA)

                            else:
                                cv2.putText(frame,
                                            "Male:%.1f" % (prob[1] * 100),
                                            (x1, y1 + 10),
                                            cv2.FONT_HERSHEY_SIMPLEX,
                                            0.35,
                                            _GRAY,
                                            lineType=cv2.LINE_AA)

        t2 = time.time()
        durr = float(t2 - t1)
        fps = 1.0 / durr
        #cv2.putText(frame,"fps:%.3f"%fps,(20,20),4, 0.5, (0, 255, 0), 1, cv2.LINE_AA)

        cv2.imshow('Detection using and box mask', frame_for_both)

        video_box_mask.write(frame_for_both)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    video_box_mask.release()
    cv2.destroyAllWindows()
Exemple #19
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    start = timeit.default_timer()

    # extract bboxes from bottom-up attention model
    image_bboxes = {}

    count = 0
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    results = {}

    if os.path.isdir(args.im_or_folder):
        # (legacy) the order of list_of_ids has to be consistent with dic_anet.json generated
        # from the GVD prepro script for correct data loading
        # target_ids = set(os.listdir(args.im_or_folder)) # temp, when a target split is given
        # list_of_folder = [i['id'] for i in json.load(open(args.list_of_ids))['videos'] \
        #     if i['id'] in target_ids]
        list_of_folder = [
            i['id'] for i in json.load(open(args.list_of_ids))['videos']
        ]
    else:
        list_of_folder = []

    N = len(list_of_folder)
    print('Number of segments to generate proposals for: ', N)
    fpv = 10
    dets_labels = np.zeros((N, fpv, 100, 7))
    dets_num = np.zeros((N, fpv))
    nms_num = np.zeros((N, fpv))
    hw = np.zeros((N, 2))

    for i, folder_name in enumerate(list_of_folder):
        dets_feat = []
        for j in range(fpv):
            im_name = os.path.join(args.im_or_folder, folder_name,
                                   str(j + 1).zfill(2) + args.image_ext)

            im = cv2.imread(im_name)
            try:
                result = get_detections_from_im(cfg, model, im, '', '',
                                                args.feat_name,
                                                args.min_bboxes,
                                                args.max_bboxes)
            except:
                print('missing frame: ', im_name)
                num_frm = j
                break

            height, width, _ = im.shape
            hw[i, 0] = height
            hw[i, 1] = width

            # store results
            num_proposal = result['boxes'].shape[0]
            proposals = np.concatenate(
                (result['boxes'], np.ones((num_proposal, 1)) * j,
                 np.expand_dims(result['object'], axis=1),
                 np.expand_dims(result['obj_prob'], axis=1)),
                axis=1)

            dets_feat.append(result['region_feat'].squeeze())

            dets_labels[i, j, :num_proposal] = proposals
            dets_num[i, j] = num_proposal
            nms_num[i, j] = num_proposal  # for now, treat them the same

        # save features to individual npy files
        feat_output_file = os.path.join(args.output_dir, folder_name + '.npy')
        if len(dets_feat) > 0:
            dets_feat = np.stack(dets_feat)
            print('Processed clip {}, feature shape {}'.format(
                folder_name, dets_feat.shape))
            np.save(feat_output_file, dets_feat)
        else:
            print('Empty feature file! Skipping {}...'.format(folder_name))

        count += 1

        if count % 10 == 0:
            end = timeit.default_timer()
            epoch_time = end - start
            print('process {:d} videos after {:.1f} s'.format(
                count, epoch_time))

    f = h5py.File(args.det_output_file, "w")
    f.create_dataset("dets_labels", data=dets_labels.view(N, -1, 7))
    f.create_dataset("dets_num", data=dets_num.sum(axis=-1))
    f.create_dataset("nms_num", data=nms_num.sum(axis=-1))
    f.create_dataset("hw", data=hw)
    f.close()
Exemple #20
0
def main():

    cfg_file = r'/home/twang/Documents/detectron/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'
    weights_file = r'/home/twang/Documents/detectron/model-weights/mask_rcnn_R-101-FPN_2x_model_final.pkl'

    merge_cfg_from_file(cfg_file)
    cfg.NUM_GPUS = 1
    weights = cache_url(weights_file, cfg.DOWNLOAD_CACHE)

    assert_and_infer_cfg()

    model = infer_engine.initialize_model_from_cfg(weights)

    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    video_dir = '/media/network_shared_disk/WangTao/test_video/KLA_airport/Entrance_Peak_Hour.avi'

    cap = cv2.VideoCapture(video_dir)

    print(*'MJPG')

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')

    video_output = cv2.VideoWriter("out.mp4", fourcc, 5, (1280, 720))

    while cap.isOpened():

        t1 = time.time()

        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, dsize=(1280, 720))

        timers = defaultdict(Timer)

        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, timers=timers)

        thresh = 0.7

        show_box = True

        show_box = True
        show_class = True
        crop_person = True

        dataset = dummy_coco_dataset

        frame_for_person_crop = frame.copy()
        frame_for_mask = frame.copy()
        """Constructs a numpy array with the detections visualized."""
        if isinstance(cls_boxes, list):
            boxes, segms, keypoints, classes = convert_from_cls_format(
                cls_boxes, cls_segms, cls_keyps)

        if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
            return frame

        if segms is not None and len(segms) > 0:
            masks = mask_util.decode(segms)
            color_list = colormap()
            mask_color_id = 0

        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

        for i in sorted_inds:
            bbox = boxes[i, :4]
            score = boxes[i, -1]
            if score < thresh:
                continue

            # show class (person, backpack, handbag, suitcase)
            class_default = ['person', 'backpack', 'handbag', 'suitcase']
            if show_class:
                class_str, class_text = get_class_string(
                    classes[i], score, dataset)

                if class_text in class_default:

                    frame = vis_class(frame, (bbox[0], bbox[1] - 2), class_str)

                    #show bounding box
                    if show_box:
                        frame = vis_bbox(frame,
                                         (bbox[0], bbox[1], bbox[2] - bbox[0],
                                          bbox[3] - bbox[1]))

                    # show mask
                    if segms is not None and len(segms) > i:
                        color_mask = color_list[mask_color_id %
                                                len(color_list), 0:3]
                        mask_color_id += 1
                        frame_for_mask = vis_mask(frame_for_mask,
                                                  masks[..., i], color_mask)

        t2 = time.time()
        durr = float(t2 - t1)
        fps = 1.0 / durr
        #cv2.putText(frame,"fps:%.3f"%fps,(20,20),4, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
        cv2.imshow('Detection using box', frame)
        cv2.imshow('Detection using mask', frame_for_mask)

        video_output.write(frame)
        #video_mask.write(frame_for_mask)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    video_output.release()
    cv2.destroyAllWindows()
Exemple #21
0
def main(args):

    logger = logging.getLogger(__name__)

    merge_cfg_from_file(args.cfg)

    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)

    assert_and_infer_cfg()

    model = infer_engine.initialize_model_from_cfg(args.weights)

    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.video_name):
        print("video_name", args.video_name)
    else:
        print("video is not existing")

    cap = cv2.VideoCapture(args.video_name)

    while cap.isOpened():

        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, dsize=(1280, 720))

        timers = defaultdict(Timer)
        t1 = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, frame, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t1))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        frame = vis_utils.vis_one_image_opencv(frame,
                                               cls_boxes,
                                               segms=cls_segms,
                                               keypoints=cls_keyps,
                                               thresh=0.8,
                                               kp_thresh=2,
                                               show_box=True,
                                               dataset=dummy_coco_dataset,
                                               show_class=True)
        t2 = time.time()
        durr = float(t2 - t1)
        fps = 1.0 / durr
        cv2.putText(frame, "fps:%.3f" % fps, (20, 20), 4, 0.5, (0, 255, 0), 1,
                    cv2.LINE_AA)
        cv2.imshow('Detection', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #22
0
def main(args):
    datasetName = 'fashion_seg_val'  #'furniture_val'
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 2
    vis = True  #False
    shuffleList = False  #True
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    if args.cls_thrsh_file is not None:
        class_thresholds = {
            l.split('\t')[0]: float(l.rstrip().split('\t')[1])
            for l in open(args.cls_thrsh_file, 'r').readlines()
        }
        print(class_thresholds)
    else:
        class_thresholds = None
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    dataset = JsonDataset(datasetName)

    if args.im_list is None:
        im_list = glob.glob(args.im_or_folder + '/*.' + args.image_ext)
        im_list = [osp.basename(n) for n in im_list]
    else:
        im_list = [
            l.rstrip() + '.jpg' for l in open(args.im_list, 'r').readlines()
        ]

    if shuffleList:
        from random import shuffle
        shuffle(im_list)
    checkMkdir(args.output_dir)
    #outTable = osp.join(args.output_dir, 'HF_CT_Measurement_Detected_Boxes.tsv')
    #with open(outTable,'wb') as fout:
    for i, im_name in enumerate(im_list):
        output_name = os.path.basename(im_name) + '.png'
        outFileName = os.path.join(args.output_dir, output_name)
        if osp.exists(outFileName):
            print("{} exists! continue".format(outFileName))
            continue
        imgFileName = osp.join(args.im_or_folder, im_name)
        print(imgFileName)
        im = cv2.imread(imgFileName)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        #for k, v in timers.items():
        #    logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        if vis:
            vis_utils.vis_one_image(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                im_name,
                args.output_dir,
                cls_boxes,
                cls_segms,
                cls_keyps,
                dataset=dataset,  #dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2)
Exemple #23
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        os.makedirs(args.output_dir, exist_ok=True)
        base_name = os.path.splitext(os.path.basename(im_name))[0]
        out_image = os.path.join(args.output_dir,
                                 '{}'.format(base_name + '.png'))
        out_data = os.path.join(args.output_dir,
                                '{}'.format(base_name + '.pickle'))

        if os.path.isfile(out_image) and os.path.isfile(out_data):
            # logger.info('Already processed {}, skipping'.format(im_name))
            continue
        else:
            logger.info('Processing {} -> {}'.format(im_name, out_image))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        if not os.path.isfile(out_data):
            with open(out_data, 'wb') as f:
                pickle.dump(
                    {
                        'boxes': cls_boxes,
                        'segmentations': cls_segms,
                        'keypoints': cls_keyps
                    }, f)

        if not os.path.isfile(out_image):
            logging.info('Visualizing %s', out_image)
            vis_utils.vis_one_image(
                im[:, :, ::-1],  # BGR -> RGB for visualization
                base_name,
                args.output_dir,
                cls_boxes,
                cls_segms,
                cls_keyps,
                dataset=dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                thresh=0.7,
                kp_thresh=2,
                dpi=300,
                ext='png')
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    # get the weight path
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        # glob.iglob will generate a iterator that has same elements as that in glob.glob
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))
        im = cv2.imread(im_name)
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')

        # save as pdf using matplotlib.pyplot

        proc_im = vis_utils.vis_one_image_opencv(im,
                                                 cls_boxes,
                                                 dataset=dummy_coco_dataset,
                                                 thresh=0.7,
                                                 show_box=True,
                                                 show_class=True)

        cv2.imshow('img', proc_im)
        cv2.waitKey(0)
        """
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )
        """
    cv2.destroyWindow('img')
def main(args):

    cfg_file = r'/home/twang/Documents/detectron/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'
    weights_file = r'/home/twang/Documents/detectron/model-weights/mask_rcnn_R-101-FPN_2x_model_final.pkl'

    video_dir = args.video_dir
    print("video_dir", video_dir)

    video_name = os.path.basename(video_dir)
    video_name = os.path.splitext(video_name)[0]
    print("video_name", video_name)

    directory_box = os.path.join(
        os.path.join(r"/home/twang/Documents/HK-person", video_name), 'box')
    print("directory_box", directory_box)
    os.makedirs(directory_box)

    directory_mask = os.path.join(
        os.path.join(r"/home/twang/Documents/HK-person", video_name), 'mask')
    print("directory_mask", directory_mask)
    os.makedirs(directory_mask)

    merge_cfg_from_file(cfg_file)
    cfg.NUM_GPUS = 1
    weights = cache_url(weights_file, cfg.DOWNLOAD_CACHE)

    assert_and_infer_cfg()

    model = infer_engine.initialize_model_from_cfg(weights)

    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    cap = cv2.VideoCapture(video_dir)

    count = 0

    while cap.isOpened():

        ret, frame = cap.read()

        if not ret:
            break

        frame = cv2.resize(frame, dsize=(1280, 720))

        total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)

        current_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
        Frame_step = 5

        if current_frame + Frame_step < total_frame:

            cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame + Frame_step)

            timers = defaultdict(Timer)

            with c2_utils.NamedCudaScope(0):
                cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                    model, frame, None, timers=timers)

            thresh = 0.9

            crop_box = True
            dataset = dummy_coco_dataset

            frame_for_box_crop = frame.copy()
            frame_for_mask = frame.copy()
            """Constructs a numpy array with the detections visualized."""
            if isinstance(cls_boxes, list):
                boxes, segms, keypoints, classes = convert_from_cls_format(
                    cls_boxes, cls_segms, cls_keyps)

            if boxes is None or boxes.shape[0] == 0 or max(boxes[:,
                                                                 4]) < thresh:
                return frame

            if segms is not None and len(segms) > 0:
                masks = mask_util.decode(segms)
                color_list = colormap()
                mask_color_id = 0

            # Display in largest to smallest order to reduce occlusion
            areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
            sorted_inds = np.argsort(-areas)

            for i in sorted_inds:
                bbox = boxes[i, :4]
                score = boxes[i, -1]
                if score < thresh:
                    continue

                # crop each box
                if crop_box:
                    #frame = vis_bbox(frame, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))

                    (x1, y1, w, h) = (int(bbox[0]), int(bbox[1]),
                                      int(bbox[2] - bbox[0]),
                                      int(bbox[3] - bbox[1]))
                    x2 = x1 + w
                    y2 = y1 + h

                    cropped = frame_for_box_crop[y1:y2, x1:x2]

                    cv2.imwrite(
                        "%s/person_Frame%i_%i.png" %
                        (directory_box, current_frame, i), cropped)

                # crop each mask
                if segms is not None and len(segms) > i:
                    color_mask = color_list[mask_color_id % len(color_list),
                                            0:3]
                    mask_color_id += 1
                    #frame = vis_mask(frame, masks[..., i], color_mask)

                    (x1, y1, w, h) = (int(bbox[0]), int(bbox[1]),
                                      int(bbox[2] - bbox[0]),
                                      int(bbox[3] - bbox[1]))
                    x2 = x1 + w
                    y2 = y1 + h

                    cropped_mask = masks[..., i]

                    cropped_mask = cropped_mask[y1:y2, x1:x2]
                    cropped_img = frame_for_mask[y1:y2, x1:x2]

                    cropped_img = vis_mask(cropped_img, cropped_mask,
                                           color_mask)

                    cv2.imwrite(
                        "%s/person_Mask_Frame%i_%i.png" %
                        (directory_mask, current_frame, i), cropped_img)

            count += 1

            print("done:%i" % count)

        else:
            pass

    cap.release()
    cv2.destroyAllWindows()
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()

    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    else:
        im_list = [args.im_or_folder]

    for i, im_name in enumerate(im_list):
        out_name = os.path.join(
            args.output_dir, '{}'.format(os.path.basename(im_name) + '.pdf'))
        logger.info('Processing {} -> {}'.format(im_name, out_name))

        im = cv2.imread(im_name)
        if im is None:
            continue

        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
        if i == 0:
            logger.info(
                ' \ Note: inference on the first image will be slower than the '
                'rest (caches and auto-tuning need to warm up)')
        lists = vis_utils.vis_one_image2(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)

        class_name = []
        class_prob = []
        if lists is not None:
            for x in lists:
                class_name.append(x.split(' ')[0])
                class_prob.append(x.split(' ')[1])

            if (len(class_name) == len(set(class_name))):
                if (class_name[0] == 'apple'):
                    lists.insert(0, '1')
                else:
                    lists.insert(0, '0')
            else:
                lists.insert(0, '0')

        else:
            lists = []
            lists.insert(0, '2')
        lists.insert(0, os.path.splitext(os.path.basename(im_name))[0])
        with open(os.path.join(args.output_dir, 'output.csv'),
                  'a') as resultFile:
            wr = csv.writer(resultFile, dialect='excel')
            wr.writerow(lists)

        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
Exemple #27
0
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    start = timeit.default_timer()

    # extract region bboxes and features from pre-trained models
    count = 0
    if not os.path.exists(args.box_output_dir):
        os.makedirs(args.box_output_dir)
    if not os.path.exists(args.featcls_output_dir):
        os.makedirs(args.featcls_output_dir)

    results = {}

    with h5py.File(os.path.join(args.box_output_dir, args.output_file_prefix+'_bbox'+args.proc_split+'.h5'), "w") as f, \
        h5py.File(os.path.join(args.featcls_output_dir, args.output_file_prefix+'_feat'+args.proc_split+'.h5'), "w") as f_feat, \
        h5py.File(os.path.join(args.featcls_output_dir, args.output_file_prefix+'_cls'+args.proc_split+'.h5'), "w") as f_cls:

        if args.dataset in ('COCO', 'Flickr30k'):
            if os.path.isdir(args.im_or_folder):
                im_list = glob.iglob(args.im_or_folder + '/*.' +
                                     args.image_ext)
        elif args.dataset == 'CC':
            valid_ids = json.load(
                open('/mnt/dat/CC/annotations/cc_valid_jpgs.json')
            )  # some images are broken. hard coded for now
            im_list = valid_ids.keys()
            print('number of valid CC images {}'.format(len(im_list)))
        elif args.dataset == 'SBU':
            valid_ids = json.load(
                open('/z/dat/VLP/dat/SBU/annotations/sbu_valid_jpgs.json')
            )  # some images are broken. hard coded for now
            im_list = valid_ids.keys()
            print('number of valid SBU images {}'.format(len(im_list)))

        for i, im_name in enumerate(im_list):
            im_base_name = os.path.basename(im_name)
            image_id = im_base_name
            if image_id[-4 - len(args.proc_split):-4] == args.proc_split:
                im_name = os.path.join(args.im_or_folder, image_id)

                print(im_name)
                im = cv2.imread(im_name)
                result = get_detections_from_im(cfg, model, im, image_id, '',
                                                args.feat_name,
                                                args.min_bboxes,
                                                args.max_bboxes)
                # store results
                proposals = np.concatenate((result['boxes'], np.expand_dims(result['object'], axis=1) \
                    .astype(np.float32), np.expand_dims(result['obj_prob'], axis=1)), axis=1)

                f.create_dataset(image_id[:-4],
                                 data=proposals.astype(args.data_type))
                f_feat.create_dataset(
                    image_id[:-4],
                    data=result['region_feat'].squeeze().astype(
                        args.data_type))
                f_cls.create_dataset(image_id[:-4],
                                     data=result['cls_prob'].astype(
                                         args.data_type))

                count += 1
                if count % 10 == 0:
                    end = timeit.default_timer()
                    epoch_time = end - start
                    print('process {:d} images after {:.1f} s'.format(
                        count, epoch_time))