예제 #1
0
    def __init__(self, debug=0):

        self.debug = debug

        self.frameCount = 0
        self.timePrev = time.time()

        #region DEEPSORT
        metric = nn_matching.NearestNeighborDistanceMetric(
            'cosine', matching_threshold=0.2, budget=100)
        self.tracker = Tracker(metric)

        absFilePath = os.path.abspath(__file__)
        fileDir = os.path.dirname(absFilePath)

        filePathModel = os.path.join(
            fileDir, 'deep_sort/resources/networks/mars-small128.pb')
        self.encoder = gdet.create_box_encoder(filePathModel, batch_size=1)
        #endregion

        self.detectionAndId = []

        self.subscriberImageDetections = rospy.Subscriber(
            'yolo_detector/output/compresseddetections',
            CompressedImageAndBoundingBoxes,
            self.callback,
            queue_size=1)

        self.publisherDetectionID = rospy.Publisher('yact/output/detectionids',
                                                    DetectionAndID,
                                                    queue_size=1)
예제 #2
0
 def __init__(self):
     params = set_params()
     # Constructing OpenPose object allocates GPU memory
     opWrapper = op.WrapperPython()
     opWrapper.configure(params)
     opWrapper.start()
     self.opWrapper = opWrapper
     # Load Deep SORT model
     self.model_path = './deep_sort/model_data/mars-small128.pb'
     self.nms_max_overlap = 1.0
     self.encoder = create_box_encoder(self.model_path, batch_size=1)
 def __init__(self):
     """
     Initializes DeepSORT session, i.e. object tracks and tracker internal state are persistent until object goes
     under garbage collector
     """
     self.__feature_extractor = create_box_encoder(
         '/work/object-tracking/workspace/pretrained/mars-small128.pb')
     self.__max_cosine_distance = 0.9
     metric = nn_matching.NearestNeighborDistanceMetric(
         "cosine", self.__max_cosine_distance, None)
     self.__tracker = Tracker(metric, n_init=5, max_age=100)
예제 #4
0
    def __init__(self,
                 config_path,
                 image_height=480,
                 image_width=640,
                 image_hz=30,
                 deepsort_modelPath="/mars_sb_14.pb",
                 nms_conf_thresh=0.4,
                 nms_iou_thresh=0.5,
                 max_cosine_distance=0.6,
                 nn_budget=200,
                 nms_max_overlap=1.0):
        try:
            import modularmot
            from modularmot.utils import ConfigDecoder
            from deep_sort.tracker import Tracker
            from deep_sort import nn_matching
            from deep_sort.tools import generate_detections as gdet
            from deep_sort import preprocessing
            from deep_sort.detection import Detection as deep_detection
            import os
        except ImportError:
            raise
        with open(config_path) as cfg_file:
            config = json.load(cfg_file, cls=ConfigDecoder)
        project_dir = os.path.dirname(__file__)
        deepsort_modelPath = os.path.join(project_dir, deepsort_modelPath)
        self.nms_max_overlap = nms_max_overlap

        #Image Info
        self.image_height = image_height
        self.image_width = image_width

        print("Load Engine")
        self.mot = modularmot.MOT(
            [int(image_width), int(image_height)],
            1.0 / int(image_hz),
            config['mot'],
            detections_only=True,
            draw=False,
            verbose=False)
        # deep_sort
        self.preprocessing = preprocessing
        self.deep_detection = deep_detection
        self.encoder = gdet.create_box_encoder(deepsort_modelPath,
                                               batch_size=1)
        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        self.tracker = Tracker(metric)
        self.currently_busy = Event()

        # Base class must be called at the end due to self.service_server.spin()
        BaseDetectionServer.__init__(self)
        print("Done Init!")
예제 #5
0
    def __init__(self,
                 model_folder,
                 max_age,
                 max_distance=0.1,
                 nn_budget=None,
                 nms_max_overlap=1.0,
                 n_init=3):
        # Definition of the parameters
        self.max_distance = max_distance
        self.nn_budget = nn_budget
        self.nms_max_overlap = nms_max_overlap

        # deep_sort
        self.encoder = gdet.create_box_encoder(model_folder, batch_size=1)
        #self.metric = nn_matching.NearestNeighborDistanceMetric("cosine", 0.5 , nn_budget) #max_cosine_distance
        self.metric = nn_matching.NearestNeighborDistanceMetric(
            "euclidean", max_distance, nn_budget)
        self.tracker = Tracker(self.metric, max_age=max_age, n_init=n_init)
예제 #6
0
    def __init__(self,
                 tracker_type="",
                 max_cosine_distance=0.7,
                 nn_budget=100,
                 deep_sort_model_path=None,
                 labels=None,
                 nms_threshold=0.7):

        self.labels = labels
        self.nms_threshold = nms_threshold
        self.tracker_type = tracker_type
        self.nn_budget = nn_budget
        self.max_cosine_distance = max_cosine_distance
        self.trackers = [[] for i in range(len(labels))]
        self.encoder = None
        if deep_sort_model_path is not None:
            from deep_sort.tools import generate_detections as gen_dt
            self.encoder = gen_dt.create_box_encoder(deep_sort_model_path,
                                                     batch_size=1)
    def __init__(self, package_name, **kwargs):
        """
        Init package attributes here

        :param kwargs: config params
        :return:
        """
        if not os.path.isdir('model_data'):
            os.makedirs('model_data')
            # download artifacts
        package = dl.packages.get(package_name=package_name)

        if not os.path.isfile('model_data/yolo.h5'):
            artifact = package.project.artifacts.get(package_name=package_name,
                                                     artifact_name='yolo.h5')
            artifact.download(local_path='model_data')
        if not os.path.isfile('model_data/yolo_anchors.txt'):
            artifact = package.project.artifacts.get(
                package_name=package_name, artifact_name='yolo_anchors.txt')
            artifact.download(local_path='model_data')
        if not os.path.isfile('model_data/coco_classes.txt'):
            artifact = package.project.artifacts.get(
                package_name=package_name, artifact_name='coco_classes.txt')
            artifact.download(local_path='model_data')
        if not os.path.isfile('model_data/mars-small128.pb'):
            artifact = package.project.artifacts.get(
                package_name=package_name, artifact_name='mars-small128.pb')
            artifact.download(local_path='model_data')

        ###############
        # load models #
        ###############
        self.yolo = YOLO()
        self.encoder = gdet.create_box_encoder('model_data/mars-small128.pb',
                                               batch_size=1)
        self.graph = tf.get_default_graph()
예제 #8
0
def detect_and_track(file_path, save_path, detection_mode="YOLO3"):
	# Definition of the parameters
	max_cosine_distance = 0.3
	nn_budget = None
	nms_max_overlap = 1.0
	# 如果要保存视频,定义视频size
	size = (640, 480)
	save_fps = 24

	# use deep_sort tracker
	model_filename = '../deep_sort/model_data/resources/networks/mars-small128.pb'
	encoder = gdet.create_box_encoder(model_filename, batch_size=1)
	metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
	tracker = Tracker(metric)

	write_video_flag = True
	# 假设图中最多300个目标,生成300种随机颜色
	colours = np.random.rand(300, 3) * 255
	video_capture = cv2.VideoCapture(file_path)

	if write_video_flag:
		output_video = cv2.VideoWriter(save_path + 'output.mp4', cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), save_fps,
		                               size)
		object_list_file = open(save_path + 'detection.txt', 'w')
		frame_index = -1

	if detection_mode == "YOLO3":
		yolo = YOLO()
	elif detection_mode == "SSD":
		ssd = SSD()

	# appear记录每个出现过的目标存在的帧数量,number记录所有出现过的目标(不重复)
	appear = {}
	number = 0

	while True:
		ret, frame = video_capture.read()
		if ret is not True:
			break
		frame = cv2.resize(frame, size)
		# 记录每一帧开始处理的时间
		start_time = time.time()
		if detection_mode == "YOLO3":
			image = Image.fromarray(frame[..., ::-1])
			# boxes为[x,y,w,h]形式坐标,detect_scores为目标分数,origin_boxes为左上角+右下角坐标形式
			boxes, detect_scores, origin_boxes = yolo.detect_image(image)
		elif detection_mode == "SSD":
			rclasses, rscores, rbboxes = ssd.process_image(frame)
			height, width = frame.shape[0], frame.shape[1]
			boxes = []
			# 遍历一帧图片中每个目标的(对应classes)
			for i in range(rclasses.shape[0]):
				# rbboxes原始形式为0-1范围的左上角和右下角坐标
				xmin, ymin = int(rbboxes[i, 1] * width), int(rbboxes[i, 0] * height)
				xmax, ymax = int(rbboxes[i, 3] * width), int(rbboxes[i, 2] * height)
				# 转换为x,y,w,h形式的坐标
				x, y, w, h = int(xmin), int(ymin), int(xmax - xmin), int(ymax - ymin)
				if x < 0:
					w = w + x
					x = 0
				if y < 0:
					h = h + y
					y = 0
				boxes.append([x, y, w, h])
			boxes = np.array(boxes)

		features = encoder(frame, boxes)
		# score to 1.0 here
		detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxes, features)]
		# 非极大值抑制
		boxes = np.array([d.tlwh for d in detections])
		scores = np.array([d.confidence for d in detections])
		indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
		detections = [detections[i] for i in indices]
		# 追踪器预测和更新
		tracker.predict()
		tracker.update(detections)

		for track in tracker.tracks:
			if not track.is_confirmed() or track.time_since_update > 1:
				continue
			bbox = track.to_tlbr()
			color = (int(colours[track.track_id % 300, 0]), int(colours[track.track_id % 300, 1]),
			         int(colours[track.track_id % 300, 2]))
			# (int(bbox[0]), int(bbox[1]))为左上角坐标,(int(bbox[2]), int(bbox[3]))为右下角坐标
			cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
			cv2.putText(frame, str(track.track_id), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200, color, 2)
			if track.track_id in appear.keys():
				appear[track.track_id] += 1
			else:
				number += 1
				appear[track.track_id] = 1

		show_fps = 1. / (time.time() - start_time)
		cv2.putText(frame, text="FPS: " + str(int(show_fps)), org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
		            fontScale=0.50, color=(0, 255, 0), thickness=2)
		cv2.putText(frame, text="number: " + str(number), org=(3, 30), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
		            fontScale=0.50, color=(0, 255, 0), thickness=2)
		cv2.imshow('result', frame)

		if write_video_flag:
			# 保存视频每一帧
			output_video.write(frame)
			# 更新视频帧编号
			frame_index = frame_index + 1
			# detection.txt写入下一帧的编号
			object_list_file.write(str(frame_index) + ' ')
			# 写入每一帧探测到的目标的框四个点坐标
			if len(boxes) != 0:
				for i in range(0, len(boxes)):
					object_list_file.write(
						str(boxes[i][0]) + ' ' + str(boxes[i][1]) + ' ' + str(boxes[i][2]) + ' ' + str(
							boxes[i][3]) + ' ')
			object_list_file.write('\n')

		# 按q可退出
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break

	video_capture.release()
	if write_video_flag:
		output_video.release()
		object_list_file.close()
	cv2.destroyAllWindows()
예제 #9
0
def main(args):
    # setting deep sort parameters
    tracker = Tracker(METRIC)
    encoder = gdet.create_box_encoder(args.tracker, batch_size=1)

    # Initialize the object detector
    detector = DetectorAPI(args.model_path)

    # open the video
    cap = cv.VideoCapture(args.video_file)

    # create output file
    if args.output_path:
        fourcc = cv.VideoWriter_fourcc(*'MP4V')
        # cap.get(3) = width, cap.get(4) = height
        output = cv.VideoWriter(args.output_path, fourcc, 20.0,
                                (int(cap.get(3)), int(cap.get(4))))

    # read the video frame by frame
    while True:
        check, frame = cap.read()
        # the end of the video?
        if not check:
            break

        # initialize tracking parameters
        sort_tracking_params = []

        # do real detection
        # boxes are in (x_top_left, y_top_left, x_bottom_right, y_bottom_right) format
        boxes, scores, classes, number = detector.processFrame(frame,
                                                               debug_time=True)

        # filter boxes due to threshold
        boxes = [
            boxes[i] for i in range(0, number) if scores[i] > args.threshold
        ]

        # do tracking
        features = encoder(frame, boxes)
        detections = [
            Detection(tbox, 1.0, feature)
            for tbox, feature in zip(boxes, features)
        ]
        tracker.predict()
        tracker.update(detections)

        # draw boxes due to tracking results
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            tbox = track.to_tlwh()
            cv.rectangle(frame, (int(tbox[1]), int(tbox[0])),
                         (int(tbox[3]), int(tbox[2])), (255, 255, 255),
                         thickness=2)
            cv.putText(frame,
                       'id: {}'.format(track.track_id),
                       (int(tbox[1]), int(tbox[0])),
                       cv.FONT_HERSHEY_PLAIN,
                       1, (255, 255, 255),
                       thickness=2)

        cv.imshow(WINDOW_TITLE, frame)

        # save frames to specified path
        if args.output_path:
            output.write(frame)

       
        key = cv.waitKey(1)
        if key & 0xFF == ord('q'):
            break
def efficientDet_video_inference(video_src,compound_coef = 0,force_input_size=None,
                                 frame_skipping = 3,
                                 threshold=0.2,out_path=None,imshow=False,
                                 display_fps=False):

    #deep-sort variables

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0


    model_filename = '/home/shaheryar/Desktop/Projects/Football-Monitoring/deep_sort/model_weights/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric,n_init=5)

    # efficientDet-pytorch variables
    iou_threshold = 0.4
    use_cuda = True
    use_float16 = False
    cudnn.fastest = True
    cudnn.benchmark = True

    input_size = input_sizes[compound_coef] if force_input_size is None else force_input_size

    # load model
    model = EfficientDetBackbone(compound_coef=compound_coef, num_classes=len(obj_list))
    model.load_state_dict(torch.load(f'weights/efficientdet-d{compound_coef}.pth'))
    model.requires_grad_(False)
    model.eval()

    if use_cuda:
        model = model.cuda()
    if use_float16:
        model = model.half()

    regressBoxes = BBoxTransform()
    clipBoxes = ClipBoxes()

    # Video capture
    cap = cv2.VideoCapture(video_src)
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    fourcc = cv2.VideoWriter_fourcc(*'MPEG')
    fps = cap.get(cv2.CAP_PROP_FPS)
    print("Video fps",fps)
    if(out_path is not None):
        outp = cv2.VideoWriter(out_path, fourcc, fps, (frame_width, frame_height))
    i=0
    start= time.time()
    current_frame_fps=0
    while True:

        ret, frame = cap.read()

        if not ret:
            break
        t1=time.time()
        if (frame_skipping==0 or i%frame_skipping==0):
        # if(True):


            # frame preprocessing (running detections)
            ori_imgs, framed_imgs, framed_metas, t1 = preprocess_video(frame, width=input_size, height=input_size)
            if use_cuda:
                x = torch.stack([fi.cuda() for fi in framed_imgs], 0)
            else:
                x = torch.stack([torch.from_numpy(fi) for fi in framed_imgs], 0)
            # model predict
            t1=time.time()
            with torch.no_grad():
                features, regression, classification, anchors = model(x)

                out = postprocess(x,
                                  anchors, regression, classification,
                                  regressBoxes, clipBoxes,
                                  threshold, iou_threshold)
            # Post processing
            out = invert_affine(framed_metas, out)
            # decoding bbox ,object name and scores
            boxes,classes,scores =decode_predictions(out[0])
            org_boxes = boxes.copy()
            t2 = time.time() - t1

            # feature extraction for deep sort
            boxes = [convert_bbox_to_deep_sort_format(frame.shape, b) for b in boxes]

            features = encoder(frame,boxes)
            detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxes, features)]
            boxes = np.array([d.tlwh for d in detections])
            # print(boxes)
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]
            tracker.predict()
            tracker.update(detections)



        i = i + 1
        img_show=frame.copy()
        for j in range(len(org_boxes)):
            img_show =drawBoxes(img_show,org_boxes[j],(255,255,0),str(tracker.tracks[j].track_id))

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            x1=int(bbox[0])
            y1 = int(bbox[1])
            x2 = int(bbox[2])
            y2=int(bbox[3])
            roi= frame[y1:y2,x1:x2]
            cv2.rectangle(img_show, (x1, y1), (x2, y2), update_color_association(roi, track.track_id), 2)
            cv2.putText(img_show, str(track.track_id), (x1, y1), 0, 5e-3 * 100, (255, 255, 0), 1)


        if display_fps:
            current_frame_fps=1/t2
        else:
            current_frame_fps=0

        cv2.putText(img_show, 'FPS: {0:.2f}'.format(current_frame_fps), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (255, 255, 0),
                    2, cv2.LINE_AA)
        if (i % int(fps) == 0):
            print("Processed ", str(int(i / fps)), "seconds")
            print("Time taken",time.time()-start)
            # print(color_dict)

        if imshow:
            img_show=cv2.resize(img_show,(0,0),fx=0.75,fy=0.75)
            cv2.imshow('Frame',img_show)
            # Press Q on keyboard to  exit
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        if out_path is not None:
            outp.write(img_show)

    cap.release()
    outp.release()
예제 #11
0
                    })
            if show:
                cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
                cv2.imshow("result", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
    finally:
        cap.release()
    return detection_dict


if __name__ == "__main__":
    import sys
    import tensorflow as tf
    from keras_yolo3.yolo import YOLO
    from deep_sort.tools import generate_detections as gdet

    # load model and tracker encoder
    yolo = YOLO()
    encoder = gdet.create_box_encoder('model_data/mars-small128.pb',
                                      batch_size=1)
    graph = tf.get_default_graph()
    # run on video
    video_filepath = sys.argv[0]
    detect_video(graph=graph,
                 yolo=yolo,
                 encoder=encoder,
                 video_filepath=video_filepath,
                 mark_on_video=True,
                 show=True)
예제 #12
0
def track(json_dir, video_dir, detect_dir, save_dir):
    starttime = timeit.default_timer()

    Path(save_dir).mkdir(parents=True, exist_ok=True)

    cam_datas = get_list_data(json_dir)

    max_cosine_distance = config['tracker']['max_cosine_distance']
    nn_budget = config['tracker']['nn_budget']
    nms_max_overlap = config['tracker']['nms_max_overlap']

    # Deep SORT
    model_filename = config['tracker']['modelfile']
    encoder = gdet.create_box_encoder(
        model_filename, batch_size=config['detector']['batchsize'] * 4)

    classes_map = config['detector']['classesmap']
    class_names = config['detector']['classnames']

    for cam_data in cam_datas:
        cam_name = cam_data['camName']
        width = int(cam_data['imageWidth'])
        height = int(cam_data['imageHeight'])
        FPS = 10.0

        video_path = os.path.join(video_dir, cam_name + '.mp4')
        video_cap = cv2.VideoCapture(video_path)
        num_frames = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))

        detect_res_path = os.path.join(detect_dir, cam_name + '.npy')
        bboxes = np.load(detect_res_path, allow_pickle=True)

        trackers = []
        filtered_tracks = []
        track_len_dict = []
        for _ in range(len(classes_map)):
            metric = nn_matching.NearestNeighborDistanceMetric(
                "cosine", max_cosine_distance, nn_budget)
            tracker = Tracker(metric)
            trackers.append(tracker)
            filtered_tracks.append([])
            track_len_dict.append({})

        for i in tqdm(range(num_frames), desc='Tracking {}'.format(cam_name)):
            success, frame = video_cap.read()

            for class_id in range(len(classes_map)):
                boxes, confidence, classes = map_boxes(bboxes[i], classes_map,
                                                       class_id)

                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                features = encoder(frame, boxes)
                detections = [
                    Detection(bbox, confidence, cls, feature)
                    for bbox, confidence, cls, feature in zip(
                        boxes, confidence, classes, features)
                ]

                # Run non-maxima suppression.
                boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(
                    boxes, nms_max_overlap, scores)
                detections = [detections[k] for k in indices]

                # tracking
                trackers[class_id].predict()
                trackers[class_id].update(detections)

                tracks = []
                for track in trackers[class_id].tracks:
                    if not track.is_confirmed() or track.time_since_update > 1:
                        continue
                    bbox = track.to_tlbr()
                    tracks.append([
                        int(bbox[0]),
                        int(bbox[1]),
                        int(bbox[2]),
                        int(bbox[3]), class_id, track.track_id
                    ])
                    if track.track_id not in track_len_dict[class_id].keys():
                        track_len_dict[class_id][track.track_id] = 1
                    else:
                        track_len_dict[class_id][track.track_id] += 1
                filtered_tracks[class_id].append(tracks)

        # remove short track
        if config['tracker']['min_len']:
            short_tracks = []
            for class_id in range(len(classes_map)):
                short_track_ids = [
                    track_id for track_id in track_len_dict[class_id].keys()
                    if track_len_dict[class_id][track_id] < config['tracker']
                    ['min_len']
                ]
                short_tracks.append(short_track_ids)

            for class_id in range(len(classes_map)):
                for frame_id in range(num_frames):
                    list_remove = []
                    for idx, track in enumerate(
                            filtered_tracks[class_id][frame_id]):
                        if track[-1] in short_tracks[class_id]:
                            list_remove.append(idx)
                    list_remove.sort(reverse=True)
                    for idx in list_remove:
                        filtered_tracks[class_id][frame_id].pop(idx)

        filtered_tracks = np.array(filtered_tracks)

        if save_dir:
            Path(save_dir).mkdir(parents=True, exist_ok=True)
            filepath = os.path.join(save_dir, cam_name)
            filtered_tracks = np.array(filtered_tracks)
            np.save(filepath, filtered_tracks)

    endtime = timeit.default_timer()

    print('Track time: {} seconds'.format(endtime - starttime))
예제 #13
0
    args = build_argparser().parse_args()
    personDetector = PersonDetector(args)
    deepSort = False
    sort = True

    # SORT
    mot_tracker = Sort()

    # DEEP SORT PARAMS
    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None

    model_filename = 'deep_sort/models/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric(
        "cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    # Open Webcam
    video_capturer = cv2.VideoCapture(0)
    video_capturer.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
    video_capturer.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    infer_time = []
    videoSize = (video_capturer.get(3), video_capturer.get(4))
    while video_capturer.isOpened():

        _, frame = video_capturer.read()