Exemplo n.º 1
0
    def _gpu_calculate(img_for_gpu):
        # openpose检测人体框=============================================================================
        start_openpose = time.time()
        humans = e.inference(img_for_gpu,
                             resize_to_default=(model_w > 0 and model_h > 0),
                             upsample_size=4.0)
        body_box_list = []
        other_data_list = []
        for human in humans:
            result = human.get_useful_data(img_for_gpu_w, img_for_gpu_h,
                                           img_original_w, img_original_h)
            if result:
                body_box = result['body_box']
                other_data = result['other_data']
                body_box_list.append(body_box)
                other_data_list.append(other_data)
        boxes = body_box_list
        # print("openpose人体框检测时间: ", time.time() - start_openpose)

        # deepsort gpu计算===============================================================================
        start_deepsort_gpu = time.time()
        features = deepsort_preprocess.get_features(img_for_gpu, boxes)
        # print("deepsort_gpu计算时间: ", time.time() - start_deepsort_gpu)

        # gpu计算的人体框resize到原始图像中==================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)
        return boxes, features, other_data_list
Exemplo n.º 2
0
    def _gpu_calculate(img_for_gpu):
        # yolo检测人体框==================================================================================
        start_yolo = time.time()
        img_for_yolo = Image.fromarray(img_for_gpu[..., ::-1])  # bgr to rgb
        boxes = yolo.detect_image(img_for_yolo)
        # print("yolo_v3人体框检测时间: ", time.time() - start_yolo)

        # deepsort gpu计算===============================================================================
        start_deepsort_gpu = time.time()
        features = deepsort_preprocess.get_features(img_for_gpu, boxes)
        # print("deepsort_gpu计算时间: ", time.time() - start_deepsort_gpu)

        # gpu计算的人体框resize到原始图像中==================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)
        return boxes, features
Exemplo n.º 3
0
def track_and_show(cameraID, camera_address, camera_rotate):
    import cv2
    import time

    from third_party.tf_pose_estimation.tf_pose.estimator import TfPoseEstimator
    from third_party.tf_pose_estimation.tf_pose.networks import get_graph_path, model_wh

    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_boxes

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)

    model = 'cmu'  # 'cmu /mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
    resolution = '656x368'  # Recommends : 432x368 or 656x368 or 1312x736'
    model_w, model_h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(model_w, model_h))

    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue

        print(frame_i)
        print("读图片时间: ", time.time() - start)
        frame_for_gpu = cv2.resize(frame, (model_w, model_h))

        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        print(frame_i)
        # openpose检测人体框===============================================================================================
        start = time.time()
        humans = e.inference(frame_for_gpu,
                             resize_to_default=(model_w > 0 and model_h > 0),
                             upsample_size=4.0)
        frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
        body_box_list = []
        other_data_list = []
        for human in humans:
            result = human.get_useful_data(frame_for_gpu_w, frame_for_gpu_h,
                                           frame_original_w, frame_original_h)
            if result:
                body_box = result['body_box']
                other_data = result['other_data']
                body_box_list.append(body_box)
                other_data_list.append(other_data)
        boxes = body_box_list
        print("openpose人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中===============================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)

        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = \
            deepsort.update(boxes, features, other_data_list)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 显示追踪结果====================================================================================================
        for track_data in not_confirmed_detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            print(track_bbox)
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])),
                          (int(track_bbox[2]), int(track_bbox[3])),
                          (255, 255, 255), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_bbox[0]), int(track_bbox[1])), 0,
                        5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            track_other_data = track_data['other_data']

            print(track_other_data)
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])),
                          (int(track_bbox[2]), int(track_bbox[3])),
                          (0, 255, 0), 3)
            cv2.putText(frame, str(track_id),
                        (int(track_bbox[0]), int(track_bbox[1])), 0,
                        5e-3 * 200, (0, 255, 0), 3)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Exemplo n.º 4
0
def area_judge_and_show(cameraID, camera_address, camera_rotate, camera_size,
                        area_info_list):
    import cv2
    import time
    from PIL import Image

    from third_party.deep_sort_yolov3.yolo import YOLO

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_boxes, box_tlwh_to_tlbr
    from persons_track.AreaJudge import AreaJudge

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model_w, model_h = 608, 608
    yolo = YOLO()
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
    points = []

    def mouse(event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            points.append((x, y))

    cv2.setMouseCallback('test', mouse)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # yolo检测人体框==================================================================================================
        start = time.time()
        image_for_yolo = Image.fromarray(
            frame_for_gpu[..., ::-1])  # bgr to rgb
        box_list = yolo.detect_image(image_for_yolo)
        print("yolo_v3人体框检测时间: ", time.time() - start)
        # gpu计算的人体框resize到原始图像中=================================================================================
        box_list = resize_boxes(box_list, w_ratio, h_ratio)

        # ==============================================================================================================
        frame = frame.copy(
        )  # opencv包装器错误,见https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-nump
        # 显示区域定位结果
        area_judge.draw(frame)
        for box_idx, body_data in enumerate(box_list):
            body_box = box_tlwh_to_tlbr(body_data)
            cv2.rectangle(frame, (body_box[0], body_box[1]),
                          (body_box[2], body_box[3]), (0, 255, 0), 2)

            body_data = {'body_box': body_box}
            # 判断区域
            area = area_judge.judge(body_data)
            cv2.putText(frame, str(area), (body_box[0], body_box[1]), 0,
                        5e-3 * 200, (0, 255, 0), 2)

        # ==============================================================================================================
        # 显示鼠标标记结果, 用于标注
        for point in points:
            x, y = point
            xy = "%d,%d" % (x, y)
            cv2.circle(frame, (x, y), 1, (255, 0, 0), thickness=-1)
            cv2.putText(frame,
                        xy, (x, y),
                        cv2.FONT_HERSHEY_PLAIN,
                        3.0, (255, 0, 0),
                        thickness=2)
        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Exemplo n.º 5
0
def person_manage_and_show(cameraID, camera_address, camera_rotate,
                           camera_size, area_info_list, areaID_list):
    import cv2
    import time
    from PIL import Image

    from third_party.deep_sort_yolov3.yolo import YOLO
    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.others import resize_boxes
    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.AreaJudge import AreaJudge
    from persons_track.PersonManage import PersonsManage
    from persons_track.DataFusion import AreaDataFusion

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model_w, model_h = 608, 608
    yolo = YOLO()
    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list,
                           mode=1)
    person_manage = PersonsManage(cameraID)
    area_data_fusion = AreaDataFusion(areaID_list)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # yolo检测人体框==================================================================================================
        start = time.time()
        image_for_yolo = Image.fromarray(
            frame_for_gpu[..., ::-1])  # bgr to rgb
        boxes = yolo.detect_image(image_for_yolo)
        print("yolo_v3人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中=================================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)

        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = deepsort.update(
            boxes, features)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 处理track数据===================================================================================================
        frame = frame.copy(
        )  # opencv包装器错误,见https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
        for track_id in track_new_id_list:  # 新出现的track
            # person_manage添加人
            person_manage.add_person_use_trackID(track_id)
        for track_id in track_delete_id_list:  # 要删除的track
            # person_manage删除人
            person_manage.delete_person_use_trackID(track_id)
        for track_data in not_confirmed_detected_track_list:  # 未确认的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            # person_manage更新body_box
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]), int(track_body_box[1]) - 40), 0,
                5e-3 * 200, (255, 255, 255), 2)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (255, 255, 255), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1])), 0,
                        5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:  # 确认了的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            # 判断区域
            body_data_for_area_judge = {'body_box': track_body_box}
            area_id = area_judge.judge(body_data_for_area_judge)
            # person_manage更新body_box, areaID
            person_manage.set_person_areaID(track_id, area_id)
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]), int(track_body_box[1]) - 40), 0,
                5e-3 * 200, (0, 255, 0), 2)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (0, 255, 0), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1])), 0,
                        5e-3 * 200, (0, 255, 0), 2)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Exemplo n.º 6
0
def person_manage_and_show(cameraID, camera_address, camera_rotate,
                           camera_size, area_info_list, areaID_list):
    import cv2
    import time

    from third_party.tf_pose_estimation.tf_pose.estimator import TfPoseEstimator
    from third_party.tf_pose_estimation.tf_pose.networks import get_graph_path, model_wh

    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.others import resize_boxes
    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.AreaJudge import AreaJudge
    from persons_track.PersonManage import PersonsManage
    from persons_track.DataFusion import AreaDataFusion

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model = 'cmu'  # 'cmu /mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
    resolution = '656x368'  # Recommends : 432x368 or 656x368 or 1312x736'
    model_w, model_h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(model_w, model_h))

    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()
    area_judge = AreaJudge(img_shape=camera_size,
                           area_info_list=area_info_list,
                           mode=2)
    person_manage = PersonsManage(cameraID)
    area_data_fusion = AreaDataFusion(areaID_list)

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 10.0, (2560, 1440))

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # openpose检测人体框===============================================================================================
        start = time.time()
        humans = e.inference(frame_for_gpu,
                             resize_to_default=(model_w > 0 and model_h > 0),
                             upsample_size=4.0)
        # frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
        body_box_list = []
        other_data_list = []
        for human in humans:
            result = human.get_useful_data(frame_for_gpu_w, frame_for_gpu_h,
                                           frame_original_w, frame_original_h)
            if result:
                body_box = result['body_box']
                other_data = result['other_data']
                body_box_list.append(body_box)
                other_data_list.append(other_data)
        boxes = body_box_list
        print("opepose人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中=================================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)

        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = deepsort.update(
            boxes, features, other_data_list)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 处理track数据===================================================================================================
        area_judge.draw(frame)
        for track_id in track_new_id_list:  # 新出现的track
            # person_manage添加人
            person_manage.add_person_use_trackID(track_id)
        for track_id in track_delete_id_list:  # 要删除的track
            # person_manage删除人
            person_manage.delete_person_use_trackID(track_id)
        for track_data in not_confirmed_detected_track_list:  # 未确认的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            # person_manage更新body_box
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]) + 40, int(track_body_box[1])), 0,
                5e-3 * 200, (255, 255, 255), 2)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (255, 255, 255), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1]) - 10),
                        0, 5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:  # 确认了的track
            # 解析track_data
            track_id = track_data['trackID']
            track_body_box = track_data['body_box']
            track_other_data = track_data['other_data']
            # 判断区域
            body_data_for_area_judge = {
                'body_box': track_body_box,
                'other_data': track_other_data
            }
            area_id = area_judge.judge(body_data_for_area_judge)
            # person_manage更新body_box, areaID
            person_manage.set_person_areaID(track_id, area_id)
            person_manage.update_person_body_box(track_id, track_body_box)
            # person_manage获取要输出的数据
            send_data_flag, data = person_manage.get_person_output_data(
                track_id)
            if send_data_flag:
                area_data_fusion.update(data)
                print(data)
                for area_data in area_data_fusion.get_data():
                    print(area_data)

            # body_box 可视化
            for part in [
                    'nose_point', 'rhip_point', 'lhip_point',
                    'rshoulder_point', 'lshoulder_point'
            ]:
                if track_other_data[part] is not None:
                    cv2.circle(frame,
                               tuple(track_other_data[part]),
                               8, (255, 0, 0),
                               thickness=-1)

            cv2.rectangle(frame,
                          (int(track_body_box[0]), int(track_body_box[1])),
                          (int(track_body_box[2]), int(track_body_box[3])),
                          (0, 255, 0), 2)
            cv2.putText(frame, str(track_id),
                        (int(track_body_box[0]), int(track_body_box[1]) - 10),
                        0, 5e-3 * 200, (0, 255, 0), 2)
            cv2.putText(frame, str(area_id),
                        (int(track_body_box[0]), int(track_body_box[1]) - 50),
                        0, 5e-3 * 200, (0, 255, 0), 2)
            cv2.putText(
                frame,
                str(person_manage.get_person_still_status_flag(track_id)),
                (int(track_body_box[0]) + 100, int(track_body_box[1]) - 50), 0,
                5e-3 * 200, (0, 255, 0), 2)
        out.write(frame)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    out.release()
    cv2.destroyAllWindows()
Exemplo n.º 7
0
def track_and_show(cameraID, camera_address, camera_rotate):
    import cv2
    import time
    from PIL import Image

    from third_party.deep_sort_yolov3.yolo import YOLO
    from third_party.deep_sort_yolov3.deep_sort_model import DeepSortPreprocess, DeepSort

    from persons_track.utils.camera_capture import VideoCapture
    from persons_track.utils.others import resize_boxes

    video_capture = VideoCapture(cameraID, camera_address, camera_rotate)
    model_w, model_h = 608, 608
    yolo = YOLO()
    deepsort_preprocess = DeepSortPreprocess()
    deepsort = DeepSort()

    # 窗口设置
    cv2.namedWindow('test', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)

    while True:
        start = time.time()
        cameraID, frame_i, frame = video_capture.read()
        if frame is None:
            print("get frame timeout")
            continue
        print("Frame_i", frame_i)
        print("读图片时间: ", time.time() - start)

        frame_for_gpu = cv2.resize(frame, (model_w, model_h))
        frame_original_h, frame_original_w = frame.shape[0:2]
        frame_for_gpu_h, frame_for_gpu_w = frame_for_gpu.shape[0:2]
        w_ratio, h_ratio = frame_original_w / frame_for_gpu_w, frame_original_h / frame_for_gpu_h

        # yolo检测人体框==================================================================================================
        start = time.time()
        image_for_yolo = Image.fromarray(frame_for_gpu[..., ::-1])  # bgr to rgb
        boxes = yolo.detect_image(image_for_yolo)
        print("yolo_v3人体框检测时间: ", time.time() - start)

        # deepsort gpu计算===============================================================================================
        start = time.time()
        features = deepsort_preprocess.get_features(frame_for_gpu, boxes)
        print("deepsort_gpu计算时间: ", time.time() - start)

        # gpu计算的人体框resize到原始图像中=================================================================================
        boxes = resize_boxes(boxes, w_ratio, h_ratio)
        # deepsort cpu计算===============================================================================================
        start = time.time()
        track_new_id_list, track_delete_id_list, not_confirmed_detected_track_list, detected_track_list = \
            deepsort.update(boxes, features)
        print("deepsort_cpu计算时间: ", time.time() - start)

        # 显示追踪结果====================================================================================================
        # frame = frame.copy()  # opencv包装器错误,见https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy
        for track_data in not_confirmed_detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])), (int(track_bbox[2]), int(track_bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track_id), (int(track_bbox[0]), int(track_bbox[1])), 0, 5e-3 * 200, (255, 255, 255), 2)

        for track_data in detected_track_list:
            track_id = track_data['trackID']
            track_bbox = track_data['body_box']
            cv2.rectangle(frame, (int(track_bbox[0]), int(track_bbox[1])), (int(track_bbox[2]), int(track_bbox[3])), (0, 255, 0), 2)
            cv2.putText(frame, str(track_id), (int(track_bbox[0]), int(track_bbox[1])), 0, 5e-3 * 200, (0, 255, 0), 2)

        cv2.imshow('test', frame)
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()