Ejemplo n.º 1
0
def face_detect_and_identify_use_pool(cameraID, camera_address, camera_rotate, recognition_result_queue):
    import cv2
    import time
    import multiprocessing as mp
    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.face_identity_use_baidu import face_detect_and_identify

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)

    pool = mp.Pool(processes=20)
    LAST_ADD_TIME = mp.Manager().Value('d', 0)
    LAST_ADD_TIME_LOCK = mp.Manager().Lock()
    # 窗口设置
    if SHOW_FLAG:
        cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("In frame_i", frame_i)
        print("读图片时间: ", time.time() - start)
        pool.apply_async(face_detect_and_identify, args=(cameraID, frame_i, frame, recognition_result_queue, LAST_ADD_TIME, LAST_ADD_TIME_LOCK))

        cv2.putText(frame, str(frame_i), (int(50), int(50)), 0, 5e-3 * 400, (0, 255, 0), 3)
        if SHOW_FLAG:
            cv2.imshow('test', frame)
            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Ejemplo n.º 2
0
def frame_data_get_and_distribute(camera_dict, track_frame_data_queue):
    """
    从frame_data_queue获取图像帧并进行数据分发, 分发给track进程和identify进程
    :param camera_dict: 摄像头信息和数据
    :param track_frame_data_queue: 用于track的队列列表, 每个GPU对应一个对列
    :return:
    """
    # 摄像头数目,gpu数目
    camera_num = len(camera_dict)
    print("Camera num: ", camera_num)
    camera_id_list = list(camera_dict.keys())
    video_cap_dict = {
        camera_id: VideoCapture(camera_id, camera_dict[camera_id].url,
                                camera_dict[camera_id].rotate)
        for camera_id in camera_id_list
    }
    identify_result_queue_dict = {
        camera_id: camera_dict[camera_id].all_result_queue
        for camera_id in camera_id_list
    }

    pool = mp.pool.ThreadPool(20)

    while True:
        # 用线程池获取frame_data
        start = time.time()
        # 每个GPU对应一个frame_data_for_body_queue
        for camera_id in camera_id_list:
            frame_data = video_cap_dict[camera_id].read()  # 读camera数据
            if isinstance(frame_data, list) and len(
                    frame_data) == 3 and frame_data[2] is not None:
                camera_id, frame_i, frame = frame_data
                # 1 identify 用线程池进行人脸检测和识别,放入结果队列
                identify_result_queue = identify_result_queue_dict[camera_id]
                # pool.apply_async(face_detect_and_identify, args=(camera_id, frame_i, frame, identify_result_queue, LAST_ADD_TIME, LAST_ADD_TIME_LOCK))
                # pool2.submit(face_detect_and_identify, args=(camera_id, frame_i, frame, identify_result_queue, LAST_ADD_TIME, LAST_ADD_TIME_LOCK))
                pool.apply_async(face_detect_and_identify,
                                 args=(camera_id, frame_i, frame,
                                       identify_result_queue))
                # 2 track 分发用于追踪的数据
                # resize
                frame = cv2.resize(frame, (IMG_FOR_GPU_W, IMG_FOR_GPU_H))
                # 向队列里放用于追踪的数据
                frame_data = [camera_id, frame_i, frame]
                put_data_to_queue(track_frame_data_queue, frame_data,
                                  "All frame_data_for_body_queue")

        distribute_time = time.time() - start
        if LOG_FLAG:
            log.logger.info("Distribute time: " + str(distribute_time))
        if distribute_time > 0.25:
            pass
        else:
            time.sleep(0.25 - distribute_time)
Ejemplo n.º 3
0
def face_detect(cameraID, camera_address, camera_rotate):
    import cv2
    import time

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.face_identity_use_baidu import baidu_face_detect
    # 窗口设置
    if SHOW_FLAG:
        cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        # 人脸检测
        start = time.time()
        face_detect_data_list = baidu_face_detect(frame)
        for face in face_detect_data_list:
            # print(face.keys()) # dict_keys(['angle', 'face_quality', 'face_token', 'face_box', 'age', 'gender', 'face_img'])
            face_box = face['face_box']
            age = face['age']
            gender = face['gender']
            print("年龄: ", age, ", 性别: ", gender)
            cv2.rectangle(frame, (int(face_box[0]), int(face_box[1])),
                          (int(face_box[2]), int(face_box[3])), (255, 0, 255),
                          2)
        print("人脸检测时间: ", time.time() - start)
        if SHOW_FLAG:
            cv2.imshow('test', frame)
            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    cv2.destroyAllWindows()
Ejemplo n.º 4
0
def track_and_show(cameraID, camera_address, camera_rotate):
    import cv2
    import time

    from third_party.tf_pose_estimation.tf_pose.estimator import TfPoseEstimator
    from third_party.tf_pose_estimation.tf_pose.networks import get_graph_path, model_wh

    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_boxes

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)

    model = 'cmu'  # 'cmu /mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
    resolution = '656x368'  # Recommends : 432x368 or 656x368 or 1312x736'
    model_w, model_h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(model_w, model_h))

    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue

        print(frame_i)
        print("读图片时间: ", time.time() - start)
        frame_for_gpu = cv2.resize(frame, (model_w, model_h))

        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        print(frame_i)
        # openpose检测人体框===============================================================================================
        start = time.time()
        humans = e.inference(frame_for_gpu,
                             resize_to_default=(model_w > 0 and model_h > 0),
                             upsample_size=4.0)
        frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
        body_box_list = []
        other_data_list = []
        for human in humans:
            result = human.get_useful_data(frame_for_gpu_w, frame_for_gpu_h,
                                           frame_original_w, frame_original_h)
            if result:
                body_box = result['body_box']
                other_data = result['other_data']
                body_box_list.append(body_box)
                other_data_list.append(other_data)
        boxes = body_box_list
        print("openpose人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中===============================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)

        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = \
            deepsort.update(boxes, features, other_data_list)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 显示追踪结果====================================================================================================
        for track_data in not_confirmed_detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            print(track_bbox)
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])),
                          (int(track_bbox[2]), int(track_bbox[3])),
                          (255, 255, 255), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_bbox[0]), int(track_bbox[1])), 0,
                        5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            track_other_data = track_data['other_data']

            print(track_other_data)
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])),
                          (int(track_bbox[2]), int(track_bbox[3])),
                          (0, 255, 0), 3)
            cv2.putText(frame, str(track_id),
                        (int(track_bbox[0]), int(track_bbox[1])), 0,
                        5e-3 * 200, (0, 255, 0), 3)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Ejemplo n.º 5
0
def area_judge_and_show(cameraID, camera_address, camera_rotate, camera_size,
                        area_info_list):
    import cv2
    import time
    from PIL import Image

    from third_party.deep_sort_yolov3.yolo import YOLO

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_boxes, box_tlwh_to_tlbr
    from persons_track.AreaJudge import AreaJudge

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model_w, model_h = 608, 608
    yolo = YOLO()
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
    points = []

    def mouse(event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            points.append((x, y))

    cv2.setMouseCallback('test', mouse)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # yolo检测人体框==================================================================================================
        start = time.time()
        image_for_yolo = Image.fromarray(
            frame_for_gpu[..., ::-1])  # bgr to rgb
        box_list = yolo.detect_image(image_for_yolo)
        print("yolo_v3人体框检测时间: ", time.time() - start)
        # gpu计算的人体框resize到原始图像中=================================================================================
        box_list = resize_boxes(box_list, w_ratio, h_ratio)

        # ==============================================================================================================
        frame = frame.copy(
        )  # opencv包装器错误,见https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-nump
        # 显示区域定位结果
        area_judge.draw(frame)
        for box_idx, body_data in enumerate(box_list):
            body_box = box_tlwh_to_tlbr(body_data)
            cv2.rectangle(frame, (body_box[0], body_box[1]),
                          (body_box[2], body_box[3]), (0, 255, 0), 2)

            body_data = {'body_box': body_box}
            # 判断区域
            area = area_judge.judge(body_data)
            cv2.putText(frame, str(area), (body_box[0], body_box[1]), 0,
                        5e-3 * 200, (0, 255, 0), 2)

        # ==============================================================================================================
        # 显示鼠标标记结果, 用于标注
        for point in points:
            x, y = point
            xy = "%d,%d" % (x, y)
            cv2.circle(frame, (x, y), 1, (255, 0, 0), thickness=-1)
            cv2.putText(frame,
                        xy, (x, y),
                        cv2.FONT_HERSHEY_PLAIN,
                        3.0, (255, 0, 0),
                        thickness=2)
        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
def person_manage_and_show(cameraID, camera_address, camera_rotate,
                           camera_size, area_info_list, areaID_list):
    import cv2
    import time
    from PIL import Image

    from third_party.deep_sort_yolov3.yolo import YOLO
    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.others import resize_boxes
    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.AreaJudge import AreaJudge
    from persons_track.PersonManage import PersonsManage
    from persons_track.DataFusion import AreaDataFusion

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model_w, model_h = 608, 608
    yolo = YOLO()
    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list,
                           mode=1)
    person_manage = PersonsManage(cameraID)
    area_data_fusion = AreaDataFusion(areaID_list)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # yolo检测人体框==================================================================================================
        start = time.time()
        image_for_yolo = Image.fromarray(
            frame_for_gpu[..., ::-1])  # bgr to rgb
        boxes = yolo.detect_image(image_for_yolo)
        print("yolo_v3人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中=================================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)

        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = deepsort.update(
            boxes, features)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 处理track数据===================================================================================================
        frame = frame.copy(
        )  # opencv包装器错误,见https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
        for track_id in track_new_id_list:  # 新出现的track
            # person_manage添加人
            person_manage.add_person_use_trackID(track_id)
        for track_id in track_delete_id_list:  # 要删除的track
            # person_manage删除人
            person_manage.delete_person_use_trackID(track_id)
        for track_data in not_confirmed_detected_track_list:  # 未确认的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            # person_manage更新body_box
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]), int(track_body_box[1]) - 40), 0,
                5e-3 * 200, (255, 255, 255), 2)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (255, 255, 255), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1])), 0,
                        5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:  # 确认了的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            # 判断区域
            body_data_for_area_judge = {'body_box': track_body_box}
            area_id = area_judge.judge(body_data_for_area_judge)
            # person_manage更新body_box, areaID
            person_manage.set_person_areaID(track_id, area_id)
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]), int(track_body_box[1]) - 40), 0,
                5e-3 * 200, (0, 255, 0), 2)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (0, 255, 0), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1])), 0,
                        5e-3 * 200, (0, 255, 0), 2)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Ejemplo n.º 7
0
def person_manage_and_show(cameraID, camera_address, camera_rotate,
                           camera_size, area_info_list, areaID_list):
    import cv2
    import time

    from third_party.tf_pose_estimation.tf_pose.estimator import TfPoseEstimator
    from third_party.tf_pose_estimation.tf_pose.networks import get_graph_path, model_wh

    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.others import resize_boxes
    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.AreaJudge import AreaJudge
    from persons_track.PersonManage import PersonsManage
    from persons_track.DataFusion import AreaDataFusion

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model = 'cmu'  # 'cmu /mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
    resolution = '656x368'  # Recommends : 432x368 or 656x368 or 1312x736'
    model_w, model_h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(model_w, model_h))

    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list,
                           mode=2)
    person_manage = PersonsManage(cameraID)
    area_data_fusion = AreaDataFusion(areaID_list)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 10.0, (2560, 1440))

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # openpose检测人体框===============================================================================================
        start = time.time()
        humans = e.inference(frame_for_gpu,
                             resize_to_default=(model_w > 0 and model_h > 0),
                             upsample_size=4.0)
        # frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
        body_box_list = []
        other_data_list = []
        for human in humans:
            result = human.get_useful_data(frame_for_gpu_w, frame_for_gpu_h,
                                           frame_original_w, frame_original_h)
            if result:
                body_box = result['body_box']
                other_data = result['other_data']
                body_box_list.append(body_box)
                other_data_list.append(other_data)
        boxes = body_box_list
        print("opepose人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中=================================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)

        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = deepsort.update(
            boxes, features, other_data_list)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 处理track数据===================================================================================================
        area_judge.draw(frame)
        for track_id in track_new_id_list:  # 新出现的track
            # person_manage添加人
            person_manage.add_person_use_trackID(track_id)
        for track_id in track_delete_id_list:  # 要删除的track
            # person_manage删除人
            person_manage.delete_person_use_trackID(track_id)
        for track_data in not_confirmed_detected_track_list:  # 未确认的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            # person_manage更新body_box
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]) + 40, int(track_body_box[1])), 0,
                5e-3 * 200, (255, 255, 255), 2)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (255, 255, 255), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1]) - 10),
                        0, 5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:  # 确认了的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            track_other_data = track_data['other_data']
            # 判断区域
            body_data_for_area_judge = {
                'body_box': track_body_box,
                'other_data': track_other_data
            }
            area_id = area_judge.judge(body_data_for_area_judge)
            # person_manage更新body_box, areaID
            person_manage.set_person_areaID(track_id, area_id)
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            for part in [
                    'nose_point', 'rhip_point', 'lhip_point',
                    'rshoulder_point', 'lshoulder_point'
            ]:
                if track_other_data[part] is not None:
                    cv2.circle(frame,
                               tuple(track_other_data[part]),
                               8, (255, 0, 0),
                               thickness=-1)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (0, 255, 0), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1]) - 10),
                        0, 5e-3 * 200, (0, 255, 0), 2)
            cv2.putText(frame, str(area_id),
                        (int(track_body_box[0]), int(track_body_box[1]) - 50),
                        0, 5e-3 * 200, (0, 255, 0), 2)
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]) + 100, int(track_body_box[1]) - 50), 0,
                5e-3 * 200, (0, 255, 0), 2)
        out.write(frame)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    out.release()
    cv2.destroyAllWindows()
Ejemplo n.º 8
0
def area_judge_and_show(cameraID, camera_address, camera_rotate, camera_size,
                        area_info_list):
    import cv2
    import time

    from third_party.tf_pose_estimation.tf_pose.estimator import TfPoseEstimator
    from third_party.tf_pose_estimation.tf_pose.networks import get_graph_path, model_wh

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_box, box_tlwh_to_tlbr
    from persons_track.AreaJudge import AreaJudge

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model = 'cmu'  # 'cmu /mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
    resolution = '656x368'  # Recommends : 432x368 or 656x368 or 1312x736'
    model_w, model_h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(model_w, model_h))
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list,
                           mode=2)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
    points = []

    def mouse(event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            points.append((x, y))

    cv2.setMouseCallback('test', mouse)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h
        # 检测人体框======================================================================================================
        start = time.time()
        humans = e.inference(frame_for_gpu,
                             resize_to_default=(model_w > 0 and model_h > 0),
                             upsample_size=4.0)
        # frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)

        body_data_for_area_judge_list = []
        for human in humans:
            result = human.get_useful_data(frame_for_gpu_w, frame_for_gpu_h,
                                           frame_original_w, frame_original_h)
            if result:
                body_box = result['body_box']
                other_data = result['other_data']

                body_box = box_tlwh_to_tlbr(body_box)
                body_box = resize_box(body_box, w_ratio, h_ratio)

                body_data_for_area_judg = {
                    'body_box': body_box,
                    'other_data': other_data
                }

                body_data_for_area_judge_list.append(body_data_for_area_judg)
        print("openpose人体框检测时间: ", time.time() - start)

        # print(body_data_for_area_judge_list)
        # ============================================================================================
        # 结果显示
        area_judge.draw(frame)
        for body_data in body_data_for_area_judge_list:
            body_box = body_data['body_box']
            other_data = body_data['other_data']
            cv2.rectangle(frame, (body_box[0], body_box[1]),
                          (body_box[2], body_box[3]), (0, 255, 0), 2)

            for part in [
                    'nose_point', 'rhip_point', 'lhip_point',
                    'rshoulder_point', 'lshoulder_point'
            ]:
                if other_data[part] is not None:
                    cv2.circle(frame,
                               tuple(other_data[part]),
                               5, (255, 0, 0),
                               thickness=-1)

            # 判断区域
            area = area_judge.judge(body_data)
            cv2.putText(frame, str(area), (body_box[0], body_box[1]), 0,
                        5e-3 * 200, (0, 255, 0), 2)

        # ===========================================================================================
        # 显示鼠标标记结果, 用于标注
        for point in points:
            x, y = point
            xy = "%d,%d" % (x, y)
            cv2.circle(frame, (x, y), 1, (255, 0, 0), thickness=-1)
            cv2.putText(frame,
                        xy, (x, y),
                        cv2.FONT_HERSHEY_PLAIN,
                        3.0, (255, 0, 0),
                        thickness=2)
        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Ejemplo n.º 9
0
def track_and_show(cameraID, camera_address, camera_rotate):
    import cv2
    import time
    from PIL import Image

    from third_party.deep_sort_yolov3.yolo import YOLO
    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_boxes

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model_w, model_h = 608, 608
    yolo = YOLO()
    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # yolo检测人体框==================================================================================================
        start = time.time()
        image_for_yolo = Image.fromarray(frame_for_gpu[..., ::-1])  # bgr to rgb
        boxes = yolo.detect_image(image_for_yolo)
        print("yolo_v3人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中=================================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)
        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = \
            deepsort.update(boxes, features)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 显示追踪结果====================================================================================================
        # frame = frame.copy()  # opencv包装器错误,见https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
        for track_data in not_confirmed_detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])), (int(track_bbox[2]), int(track_bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track_id), (int(track_bbox[0]), int(track_bbox[1])), 0, 5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])), (int(track_bbox[2]), int(track_bbox[3])), (0, 255, 0), 2)
            cv2.putText(frame, str(track_id), (int(track_bbox[0]), int(track_bbox[1])), 0, 5e-3 * 200, (0, 255, 0), 2)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()