Exemple #1
0
def imgs_detection(imgs, model, thre, step=1):
    """图片组识别
    
    Args:
        imgs ([type]): 图片列表
        thre ([int]):阈值
        model ([type]): 模型
        step (int, optional): 识别步长. Defaults to 1.
    
    Returns:
        [type]: [description]
    """
    frames_index = [x for x in range(0, len(imgs), step)]
    hat_bboxs, person_bboxs = [], []
    for img in imgs[frames_index]:
        result = inference_detector(model, img)
        hat_bbox, person_bbox = result[0], result[1]
        # 剔除分数过低的bbox
        hat_bbox, person_bbox = hat_bbox[hat_bbox[:, 4] > thre], person_bbox[
            person_bbox[:, 4] > thre]
        # 将阈值数据抹除
        hat_bbox = hat_bbox[:, 0:4]
        hat_bboxs.append(hat_bbox)
        person_bbox = person_bbox[:, 0:4]
        person_bboxs.append(person_bbox)

    hat_bboxs, person_bboxs = np.array(hat_bboxs), np.array(person_bboxs)
    return frames_index, hat_bboxs, person_bboxs
Exemple #2
0
def process_video(model,
                  input_path,
                  output_path,
                  require_fps,
                  hat_color,
                  person_color,
                  fourcc='mp4v'):
    """处理视频并输出到指定目录
    
    Arguments:
        model {torch.nn.Sequ} -- [使用的模型]
        input_path {[str]} -- [视频文件路径]
        require_fps {[int]} -- [输出的视频fps]
        fourcc {[str]} -- [opencv写文件编码格式]
        hat_color {[str]} -- [安全帽框颜色]
        person_color {[str]} -- [人头框颜色]
        process_step {[int]} -- [以step分钟的间隔处理整个视频,内存越大step可以越大]
    """
    video = mmcv.VideoReader(input_path)
    # 初始化人头追踪器
    psn_tracker = Tracker()
    resolution = (video.width, video.height)
    video_fps = video.fps
    #ds = DetectionSifter(int(video_fps),osp.basename(args.input_path).split('.')[0],1,3,resolution,get_collection())
    if require_fps is None:
        require_fps = video_fps
    if require_fps > video_fps:
        require_fps = video_fps
    vwriter = cv2.VideoWriter(output_path, VideoWriter_fourcc(*fourcc),
                              require_fps, resolution)
    for frame in tqdm(video):
        # bbox:(hat_bbox,person_bbox)
        st = time.time()
        bboxs = inference_detector(model, frame)
        et = time.time()
        Loger.info('探测耗时{0}'.format(et - st))
        frame_result = get_result(frame,
                                  bboxs,
                                  class_names=model.CLASSES,
                                  auto_thickness=True,
                                  color_dist={
                                      'hat': 'green',
                                      'person': 'red'
                                  })
        # person_bboxs:(N,5)
        person_bboxs = bboxs[1]
        # 筛选阈值大于0.5进行追踪
        person_bboxs = person_bboxs[person_bboxs[:, 4] > 0.5]
        person_bboxs = np.expand_dims(person_bboxs, 0)
        person_bboxs_tracks = track(person_bboxs, psn_tracker)[0]
        #ds.add_object(person_bboxs_tracks,frame)
        vwriter.write(frame_result)
    #ds.clear()
    print('process finshed')
Exemple #3
0
def img_detection(img, model, thre):
    result = inference_detector(model, img)
    hat_bbox, person_bbox = result[0], result[1]
    # 剔除分数过低的bbox
    hat_bbox, person_bbox = hat_bbox[hat_bbox[:, 4] > thre], person_bbox[
        person_bbox[:, 4] > thre]
    # 将阈值数据取出
    hat_bbox_pro = hat_bbox[:, 4]
    hat_bbox = hat_bbox[:, 0:4]
    person_bbox_pro = person_bbox[:, 4]
    person_bbox = person_bbox[:, 0:4]
    return (hat_bbox, hat_bbox_pro), (person_bbox, person_bbox_pro)
Exemple #4
0
def main():
    args = parse_args()

    model = init_detector(args.config,
                          args.checkpoint,
                          device=torch.device('cuda', args.device))

    camera = cv2.VideoCapture(args.camera_id)

    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        ret_val, img = camera.read()
        result = inference_detector(model, img)

        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        show_result(img,
                    result,
                    model.CLASSES,
                    score_thr=args.score_thr,
                    wait_time=1)
async def main():
    """

    Benchmark between async and synchronous inference interfaces.

    Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:

    async	sync

    7981.79 ms	9660.82 ms
    8074.52 ms	9660.94 ms
    7976.44 ms	9406.83 ms

    Async variant takes about 0.83-0.85 of the time of the synchronous
    interface.

    """
    project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))

    config_file = os.path.join(project_dir, 'configs/mask_rcnn_r50_fpn_1x.py')
    checkpoint_file = os.path.join(
        project_dir, 'checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')

    if not os.path.exists(checkpoint_file):
        url = ('https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection'
               '/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')
        print('Downloading {} ...'.format(url))
        local_filename, _ = urllib.request.urlretrieve(url)
        os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
        shutil.move(local_filename, checkpoint_file)
        print('Saved as {}'.format(checkpoint_file))
    else:
        print('Using existing checkpoint {}'.format(checkpoint_file))

    device = 'cuda:0'
    model = init_detector(config_file,
                          checkpoint=checkpoint_file,
                          device=device)

    # queue is used for concurrent inference of multiple images
    streamqueue = asyncio.Queue()
    # queue size defines concurrency level
    streamqueue_size = 4

    for _ in range(streamqueue_size):
        streamqueue.put_nowait(torch.cuda.Stream(device=device))

    # test a single image and show the results
    img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))

    # warmup
    await async_inference_detector(model, img)

    async def detect(img):
        async with concurrent(streamqueue):
            return await async_inference_detector(model, img)

    num_of_images = 20
    with profile_time('benchmark', 'async'):
        tasks = [
            asyncio.create_task(detect(img)) for _ in range(num_of_images)
        ]
        async_results = await asyncio.gather(*tasks)

    with torch.cuda.stream(torch.cuda.default_stream()):
        with profile_time('benchmark', 'sync'):
            sync_results = [
                inference_detector(model, img) for _ in range(num_of_images)
            ]

    result_dir = os.path.join(project_dir, 'demo')
    show_result(img,
                async_results[0],
                model.CLASSES,
                score_thr=0.5,
                show=False,
                out_file=os.path.join(result_dir, 'result_async.jpg'))
    show_result(img,
                sync_results[0],
                model.CLASSES,
                score_thr=0.5,
                show=False,
                out_file=os.path.join(result_dir, 'result_sync.jpg'))