Exemple #1
0
def detection_pipeline(log, args=None):
    """
    Detection main function
    :param log:
    :param args:
    :return:
    """
    config = Config(log)

    img_archive_dir = config.img_archive_dir
    if not os.path.exists(img_archive_dir):
        os.makedirs(img_archive_dir)

    father_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))

    '''would need a starting time for video'''
    config.st = datetime.datetime.now()
    if config.socket_enable:
        config.start_status_report_timer()
        if config.reupload_enbale:
            config.re_upload_cache()
        config.start_log_upload_timer()

    # Initialize for each camera
    if args[1] in ['use_onboard_camera', 'use_local_rtsp']:
        camera = Camera(log, args[1], local=False, cfg_path=father_dir + '/camera_info.txt')
    elif args[1] == 'use_captured_video':
        camera = Camera(log, args[1], local=True, cfg_path=father_dir + '/camera_info.txt')
    else:
        log.logger.error('Unrecognized execution mode argument {}'.format(args[1]))
        exit()

    dnn_model = DnnModel(log, config)
    for cam_id in camera.info:
        motion_roi = camera.info[cam_id]['coord']['bounding_rect']
        cov_roi, spot_roi1, spot_roi2 = camera.info[cam_id]['coord']['roi_rects']
        parking_ids = camera.info[cam_id]['parking_ids']
        transformation_matrix = camera.info[cam_id]['transformation_matrix']

        # convert to (left, top, right, bottom), may change in yaml file later.
        motion_roi = [motion_roi[0], motion_roi[1], motion_roi[0] + motion_roi[2], motion_roi[1] + motion_roi[3]]
        cov_roi = [cov_roi[0], cov_roi[1], cov_roi[0] + cov_roi[2], cov_roi[1] + cov_roi[3]]
        spot_roi1 = [spot_roi1[0], spot_roi1[1], spot_roi1[0] + spot_roi1[2], spot_roi1[1] + spot_roi1[3]]
        spot_roi2 = [spot_roi2[0], spot_roi2[1], spot_roi2[0] + spot_roi2[2], spot_roi2[1] + spot_roi2[3]]
        # trace_roi are used for backtrace
        trace_roi = [cov_roi[0] * 2 - cov_roi[2], cov_roi[1], cov_roi[0], cov_roi[3]]

        # set parameters for different cameras
        if int(cam_id) == 1:
            distance_thres = [-0.2, 0.9]
        elif int(cam_id) == 2:
            distance_thres = [-0.3, 0.9]
        elif int(cam_id) == 3:
            distance_thres = [-0.3, 0.9]
        else:
            distance_thres = [-0.3, 0.9]
        parking_spot = ParkingSpot(log, config, cov_roi, spot_roi1, spot_roi2, parking_ids, trace_roi, distance_thres,
                                   camera.info[cam_id]['initial_state'])
        logic = B_logic(log, 8)
        motion = Motion(log, motion_roi, dnn_model, parking_spot, logic, transformation_matrix)
        camera.info[cam_id]['ParkingSpot'] = motion

    if args[1] == 'use_onboard_camera' or args[1] == 'use_local_rtsp':
        camera.capture_pic_multithread()

    while config.cap_on is True:
        # camera robin-round
        for cam_id in camera.info:
            motion = camera.info[cam_id]['ParkingSpot']
            parking_draw = camera.info[cam_id]['parking_draw']
            # Read frame-by-frame
            if args[1] == 'use_onboard_camera' or args[1] == 'use_local_rtsp':
                ret, video_cur_frame, video_cur_time, video_cur_cnt = camera.get_frame_from_queue(cam_id)
            else:
                cap = camera.info[cam_id]['video_cap']
                if cap is None:
                    continue
                # Current position of the video file in seconds, indicate current time
                video_cur_time = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
                # Index of the frame to be decoded/captured next
                video_cur_cnt = cap.get(cv2.CAP_PROP_POS_FRAMES)
                ret, video_cur_frame = cap.read()

            if ret is False:
                log.logger.info('Video Analysis Finished!')
                config.cap_on = False
                break
            elif ret is None:
                continue

            '''run each ParkingSpot'''
            frame_info = {'cur_frame': video_cur_frame, 'cur_time': video_cur_time, 'cur_cnt': video_cur_cnt}
            if video_cur_cnt % motion.read_step == 0:
                # frame_for_display is returned for debug, need to change later
                frame_for_display = detection_flow(log, motion, dnn_model, frame_info)

                # the rest is added only for debug
                show_camera(motion, frame_for_display, parking_draw)

    camera.release_video_cap()
    config.stop_status_report_timer()
    config.ftp_upload_enable = False