Exemple #1
0
async def update_track(branch_id: int,
                       bboxes: str = Body(..., embed=True),
                       files: List[UploadFile] = File(...)):
    global tracker
    if (branch_id not in tracker):
        tracker[branch_id] = MultiCameraTracker(number_of_cameras, reid,
                                                config)
    d_bboxes = json.loads(bboxes)
    tracker[branch_id].process([[
        cv2.imdecode(np.fromstring(im.file.read(), np.uint8), cv2.IMREAD_COLOR)
        for im in files
    ]], [d_bboxes])
    return {"status": 'success'}
def run(params, capture, detector, reid):
    win_name = 'Multi camera tracking'
    config = {}
    if len(params.config):
        config = read_py_config(params.config)

    tracker = MultiCameraTracker(capture.get_num_sources(), reid, **config)

    thread_body = FramesThreadBody(capture,
                                   max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

    if len(params.output_video):
        video_output_size = (1920 // capture.get_num_sources(), 1080)
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        output_video = cv.VideoWriter(params.output_video, fourcc, 24.0,
                                      video_output_size)
    else:
        output_video = None

    while thread_body.process:
        start = time.time()
        try:
            frames = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.get_detections(frames)
        all_masks = [[] for _ in range(len(all_detections))]
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        tracker.process(frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        fps = round(1 / (time.time() - start), 1)
        vis = visualize_multicam_detections(frames, tracked_objects, fps)
        if not params.no_show:
            cv.imshow(win_name, vis)
            if cv.waitKey(1) == 27:
                break
        if output_video:
            output_video.write(cv.resize(vis, video_output_size))

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        history = tracker.get_all_tracks_history()
        with open(params.history_file, 'w') as outfile:
            json.dump(history, outfile)
def run(params, config, capture, detector, reid):
    win_name = 'Multi camera tracking'
    frame_number = 0
    avg_latency = AverageEstimator()
    output_detections = [[] for _ in range(capture.get_num_sources())]
    key = -1

    if config['normalizer_config']['enabled']:
        capture.add_transform(
            NormalizerCLAHE(
                config['normalizer_config']['clip_limit'],
                config['normalizer_config']['tile_size'],
            ))

    tracker = MultiCameraTracker(capture.get_num_sources(),
                                 reid,
                                 config['sct_config'],
                                 **config['mct_config'],
                                 visual_analyze=config['analyzer'])

    thread_body = FramesThreadBody(capture,
                                   max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

    if len(params.output_video):
        frame_size, fps = capture.get_source_parameters()
        target_width, target_height = get_target_size(
            frame_size, None, **config['visualization_config'])
        video_output_size = (target_width, target_height)
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        output_video = cv.VideoWriter(params.output_video, fourcc, min(fps),
                                      video_output_size)
    else:
        output_video = None

    prev_frames = thread_body.frames_queue.get()
    detector.run_async(prev_frames, frame_number)
    presenter = monitors.Presenter(params.utilization_monitors, 0)

    while thread_body.process:
        if not params.no_show:
            key = check_pressed_keys(key)
            if key == 27:
                break
            presenter.handleKey(key)
        start = time.perf_counter()
        try:
            frames = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.wait_and_grab()
        if params.save_detections:
            update_detections(output_detections, all_detections, frame_number)
        frame_number += 1
        detector.run_async(frames, frame_number)

        all_masks = [[] for _ in range(len(all_detections))]
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        tracker.process(prev_frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        latency = max(time.perf_counter() - start, sys.float_info.epsilon)
        avg_latency.update(latency)
        fps = round(1. / latency, 1)

        vis = visualize_multicam_detections(prev_frames, tracked_objects, fps,
                                            **config['visualization_config'])
        presenter.drawGraphs(vis)
        if not params.no_show:
            cv.imshow(win_name, vis)

        if output_video:
            output_video.write(cv.resize(vis, video_output_size))

        print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
            frame_number, fps, 1. / avg_latency.get()),
              end="")
        prev_frames, frames = frames, prev_frames
    print(presenter.reportMeans())
    print('')

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        save_json_file(params.history_file,
                       tracker.get_all_tracks_history(),
                       description='History file')
    if len(params.save_detections):
        save_json_file(params.save_detections,
                       output_detections,
                       description='Detections')

    if len(config['embeddings']['save_path']):
        save_embeddings(tracker.scts, **config['embeddings'])
def run(params, config, capture, detector, reid):
    win_name = 'Multi camera tracking'
    frame_number = 0
    output_detections = [[] for _ in range(capture.get_num_sources())]
    key = -1

    if config.normalizer_config.enabled:
        capture.add_transform(
            NormalizerCLAHE(
                config.normalizer_config.clip_limit,
                config.normalizer_config.tile_size,
            ))

    tracker = MultiCameraTracker(capture.get_num_sources(),
                                 reid,
                                 config.sct_config,
                                 **vars(config.mct_config),
                                 visual_analyze=config.analyzer)

    thread_body = FramesThreadBody(capture,
                                   max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

    frames_read = False
    set_output_params = False

    prev_frames = thread_body.frames_queue.get()
    detector.run_async(prev_frames, frame_number)
    metrics = PerformanceMetrics()
    presenter = monitors.Presenter(params.utilization_monitors, 0)

    while thread_body.process:
        if not params.no_show:
            key = check_pressed_keys(key)
            if key == 27:
                break
            presenter.handleKey(key)
        start_time = time.perf_counter()
        try:
            frames = thread_body.frames_queue.get_nowait()
            frames_read = True
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.wait_and_grab()
        if params.save_detections:
            update_detections(output_detections, all_detections, frame_number)
        frame_number += 1
        detector.run_async(frames, frame_number)

        all_masks = [[] for _ in range(len(all_detections))]
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        tracker.process(prev_frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        vis = visualize_multicam_detections(
            prev_frames, tracked_objects, **vars(config.visualization_config))
        metrics.update(start_time, vis)
        presenter.drawGraphs(vis)
        if not params.no_show:
            cv.imshow(win_name, vis)

        if frames_read and not set_output_params:
            set_output_params = True
            if len(params.output_video):
                frame_size = [frame.shape[::-1] for frame in frames]
                fps = capture.get_fps()
                target_width, target_height = get_target_size(
                    frame_size, None, **vars(config.visualization_config))
                video_output_size = (target_width, target_height)
                fourcc = cv.VideoWriter_fourcc(*'XVID')
                output_video = cv.VideoWriter(params.output_video, fourcc,
                                              min(fps), video_output_size)
            else:
                output_video = None
        if set_output_params and output_video:
            output_video.write(cv.resize(vis, video_output_size))

        prev_frames, frames = frames, prev_frames

    metrics.log_total()
    for rep in presenter.reportMeans():
        log.info(rep)

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        save_json_file(params.history_file,
                       tracker.get_all_tracks_history(),
                       description='History file')
    if len(params.save_detections):
        save_json_file(params.save_detections,
                       output_detections,
                       description='Detections')

    if len(config.embeddings.save_path):
        save_embeddings(tracker.scts, **vars(config.embeddings))
Exemple #5
0
def run(params, capture, detector, reid, jot):  #params : args 임
    win_name = 'TEAM_KOTLIN'
    config = {}

    if len(params.config):
        config = read_py_config(params.config)

    tracker = MultiCameraTracker(capture.get_num_sources(), reid, **config)

    thread_body = FramesThreadBody(capture,
                                   max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

    if len(params.output_video):
        video_output_size = (1920 // capture.get_num_sources(), 1080)
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        output_video = cv.VideoWriter(params.output_video, fourcc, 24.0,
                                      video_output_size)
    else:
        output_video = None

    print("##################################################################")

    while thread_body.process:

        start = time.time()
        try:
            frames = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.get_detections(frames)
        #all_detections 좌표 성분 : ((left, right, top, bot), confidence) 값들의 리스트
        # 1번 영상의 디텍션 위치는 앞에 2번 영상의 디텍션 위치는 뒤에 표시됨
        # all_detextions : [[1번영상 사람들의 좌표, 컨디션], [2번 영상 사람들의 좌표, 컨디션]]
        all_masks = [[] for _ in range(len(all_detections))
                     ]  # 디텍션 갯수만큼 비어있는 마스크 리스트 만듬
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        feature_data = tracker.process(frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        #20200511 추가
        track_data = copy.deepcopy(feature_data)
        for track in track_data:
            del track['features']
            del track['boxes']
            del track['timestamps']
            del track['cam_id']

        #jottable 넘기는 함수 콜
        jot.check_jot(tracked_objects, frames, track_data)

        fps = round(1 / (time.time() - start), 1)

        #시간 처리
        dates = datetime.now()
        vis = visualize_multicam_detections(frames, tracked_objects, fps,
                                            dates)
        if not params.no_show:
            cv.imshow(win_name, vis)
            if cv.waitKey(1) == 27:
                break
        if output_video:
            output_video.write(cv.resize(vis, video_output_size))

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        history = tracker.get_all_tracks_history()
        with open(params.history_file, 'w') as outfile:
            json.dump(history, outfile)

    print("##################################################################")
Exemple #6
0
def app(video_link, video_name, show, record, flip_hor, flip_ver):
    # initialize Face Detection net
    config = read_py_config('config.py')
    object_detector = ObjectDetector()
    reid = PersonEmbedder()

    # initialize Video Capturer
    cap = MulticamCapture(video_link)
    #cap = WebcamVideoStream(src=video_link).start()
    # (W, H), FPS = imgproc.cameraCalibrate(cap)
    # LOG.info('Camera Info: ({}, {}) - {:.3f}'.format(W, H, FPS))
    tracker = MultiCameraTracker(cap.get_num_sources(), reid, **config)
    thread_body = FramesThreadBody(cap, max_queue_length=len(cap.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

    if record:
        time_str = time.strftime(cfg.TIME_FM)
        writer = cv.VideoWriter(video_name + time_str + '.avi',
                                cv.VideoWriter_fourcc(*'XVID'), 20,
                                (1280, 720))

    cnt_frm = 0
    counter = 0
    while thread_body.process:
        try:
            frm = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frm = None
        if frm is None:
            continue
        cnt_frm += 1

        # if flip_ver: frm = cv.flip(frm, 0)
        # if flip_hor: frm = cv.flip(frm, 1)
        _start_t = time.time()
        all_detections = []
        for f in frm:
            f = imgproc.resizeByHeight(f, 640)
            _, bboxes = object_detector.getObjects(f, def_score=0.5)
            all_detections.append(bboxes)

        tracker.process(frm, all_detections, [[]])
        tracked_objects = tracker.get_tracked_objects()
        _prx_t = time.time() - _start_t
        fps = round(1 / _prx_t, 1)
        if len(bboxes):
            frm = visualize_multicam_detections(frm, tracked_objects, fps)
        frm = vis.plotInfo(frm,
                           'Raspberry Pi - FPS: {:.3f}'.format(1 / _prx_t))
        frm = cv.cvtColor(np.asarray(frm), cv.COLOR_BGR2RGB)

        if record:
            writer.write(frm)

        if show:
            cv.imshow(video_name, frm)
            key = cv.waitKey(1)
            if key in [27, ord('q')]:
                LOG.info('Interrupted by Users')
                break
        else:
            if (counter % 10 == 0):
                print(
                    f"IN : {SingleCameraTracker.COUNT_IN}, OUT: {SingleCameraTracker.COUNT_OUT}"
                )
        counter += 1

    thread_body.process = False
    frames_thread.join()
def run(params, config, capture, detector, reid):
    ix,iy = -1,-1
    
    pts_src = np.array([[561,1022],[990,698],[486,273],[95,504]],dtype='float32')
    pts_dest = np.array([[0,0],[0,400],[400,700],[0,700]],dtype='float32')
    # calculate matrix H
    h, status = cv.findHomography(pts_src,pts_dest)

    
    win_name = 'Multi camera tracking'
    frame_number = 0
    avg_latency = AverageEstimator()
    output_detections = [[] for _ in range(capture.get_num_sources())]
    key = -1
    refObj = []
    
    if config['normalizer_config']['enabled']:
        capture.add_transform(
            NormalizerCLAHE(
                config['normalizer_config']['clip_limit'],
                config['normalizer_config']['tile_size'],
            )
        )

    tracker = MultiCameraTracker(capture.get_num_sources(), reid, config['sct_config'], **config['mct_config'],
                                 visual_analyze=config['analyzer'])

    thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

  
    if len(params.output_video):
        frame_size, fps = capture.get_source_parameters()
        target_width, target_height = get_target_size(frame_size, None, **config['visualization_config'])
        video_output_size = (target_width, target_height)
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        output_video = cv.VideoWriter(params.output_video,cv.VideoWriter_fourcc('M','J','P','G'),min(fps),video_output_size)
        # output_video = cv.VideoWriter(params.output_video, fourcc, min(fps), video_output_size)
    else:
        output_video = None

    prev_frames = thread_body.frames_queue.get()
    detector.run_async(prev_frames, frame_number)

    while thread_body.process:
        if not params.no_show:
            key = check_pressed_keys(key)
            if key == 27:
                break
        start = time.time()
        try:
            frames = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.wait_and_grab()
        
        for det in all_detections:
            for obj in det:
                print("Boxes:",obj[0])
                print("Confidence:",obj[1])
                
        if params.save_detections:
            update_detections(output_detections, all_detections, frame_number)
        frame_number += 1
        detector.run_async(frames, frame_number)

        all_masks = [[] for _ in range(len(all_detections))]
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        tracker.process(prev_frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        latency = time.time() - start
        avg_latency.update(latency)
        fps = round(1. / latency, 1)

        vis = visualize_multicam_detections(prev_frames, tracked_objects, fps, **config['visualization_config'],h=h)
                
        if not params.no_show:
            cv.setMouseCallback(win_name, getMousePointer)
            if ix!=-1 and iy!=-1:
                refObj.append((ix,iy))
                ix=-1
                iy=-1
                print(len(refObj))
    
            if len(refObj)==2:
                print("Len 2 Rectangle Drawn.")
                vis = cv.rectangle(vis, refObj[0], refObj[1],(255,0,0), 2)
                refObj.clear()    
            
            cv.imshow(win_name, vis)
            # cv.imwrite("refPicture.png",vis)
            
            
        if output_video:
            output_video.write(cv.resize(vis, video_output_size))

    #     print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
    #                         frame_number, fps, 1. / avg_latency.get()), end="")
        prev_frames, frames = frames, prev_frames
    # print('')

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        save_json_file(params.history_file, tracker.get_all_tracks_history(), description='History file')
    if len(params.save_detections):
        save_json_file(params.save_detections, output_detections, description='Detections')

    if len(config['embeddings']['save_path']):
        save_embeddings(tracker.scts, **config['embeddings'])
def app(video_link, video_name, show, record, flip_hor, flip_ver):
    # initialize Face Detection net
    config = read_py_config('config.py')
    object_detector = ObjectDetector()
    reid = PersonEmbedder()

    # initialize Video Capturer
    #cap = MulticamCapture(video_link)
    cap = WebcamVideoStream(src=video_link).start()
    (W, H), FPS = imgproc.cameraCalibrate(cap)
    LOG.info('Camera Info: ({}, {}) - {:.3f}'.format(W, H, FPS))
    tracker = MultiCameraTracker(1, reid, **config)

    if record:
        time_str = time.strftime(cfg.TIME_FM)
        writer = cv.VideoWriter(video_name + time_str + '.avi',
                                cv.VideoWriter_fourcc(*'XVID'), 20,
                                (1280, 720))

    cnt_frm = 0
    counter = 0
    while True:
        frm = cap.read()
        if frm is None:
            continue
        cnt_frm += 1

        # if flip_ver: frm = cv.flip(frm, 0)
        # if flip_hor: frm = cv.flip(frm, 1)
        _start_t = time.time()
        all_detections = []

        frm = imgproc.resizeByHeight(frm, 640)
        _, bboxes = object_detector.getObjects(frm, def_score=0.5)
        all_detections.append(bboxes)

        tracker.process([frm], all_detections, [[]])
        tracked_objects = tracker.get_tracked_objects()
        _prx_t = time.time() - _start_t
        fps = round(1 / _prx_t, 1)
        if len(bboxes):
            frm = visualize_multicam_detections([frm], tracked_objects, fps)
        frm = vis.plotInfo(frm,
                           'Raspberry Pi - FPS: {:.3f}'.format(1 / _prx_t))
        frm = cv.cvtColor(np.asarray(frm), cv.COLOR_BGR2RGB)

        if record:
            writer.write(frm)

        if show:
            cv.imshow(video_name, frm)
            key = cv.waitKey(1)
            if key in [27, ord('q')]:
                LOG.info('Interrupted by Users')
                break
        else:
            if (counter % 10 == 0):
                print(
                    f"IN : {SingleCameraTracker.COUNT_IN}, OUT: {SingleCameraTracker.COUNT_OUT}"
                )
        counter += 1

    if record:
        writer.release()
    cap.release()
    cv.destroyAllWindows()