Example #1
0
def test_rtf_stream(conversion):
    """
    Testing Real-Time Frames Mode
    """
    mpd_file_path = return_mpd_path()
    try:
        # Open stream
        stream = CamGear(source=return_testvideo_path(),
                         colorspace=conversion).start()
        stream_params = {
            "-clear_prev_assets": True,
            "-input_framerate": "invalid",
        }
        streamer = StreamGear(output=mpd_file_path, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            if conversion == "COLOR_BGR2RGBA":
                streamer.stream(frame, rgb_mode=True)
            else:
                streamer.stream(frame)
        stream.stop()
        streamer.terminate()
        mpd_file = [
            os.path.join(mpd_file_path, f) for f in os.listdir(mpd_file_path)
            if f.endswith(".mpd")
        ]
        assert len(mpd_file) == 1, "Failed to create MPD file!"
        assert check_valid_mpd(mpd_file[0])
    except Exception as e:
        pytest.fail(str(e))
def test_write(conversion):
	"""
	Testing WriteGear Compression-Mode(FFmpeg) Writer capabilties in different colorspace
	"""
	#Open stream
	stream = CamGear(source=return_testvideo_path(), colorspace = conversion, logging=True).start()
	writer = WriteGear(output_filename = 'Output_tw.mp4',  custom_ffmpeg = return_static_ffmpeg()) #Define writer
	while True:
		frame = stream.read()
		# check if frame is None
		if frame is None:
			#if True break the infinite loop
			break

		if conversion in ['COLOR_BGR2RGB', 'COLOR_BGR2RGBA']:
			writer.write(frame, rgb_mode = True)
		else:
			writer.write(frame)
	stream.stop()
	writer.close()
	basepath, _ = os.path.split(return_static_ffmpeg()) 
	ffprobe_path  = os.path.join(basepath,'ffprobe.exe' if os.name == 'nt' else 'ffprobe')
	result = check_output([ffprobe_path, "-v", "error", "-count_frames", "-i", os.path.abspath('Output_tw.mp4')])
	if result:
		if not isinstance(result, string_types):
			result = result.decode()
		logger.debug('Result: {}'.format(result))
		for i in ["Error", "Invalid", "error", "invalid"]:
			assert not(i in result)
	os.remove(os.path.abspath('Output_tw.mp4'))
Example #3
0
def test_youtube_playback():
	"""
	Testing Youtube Video Playback capabilities of VidGear
	"""
	if os.name != 'nt':
		Url = 'https://youtu.be/YqeW9_5kURI'
		result = True
		errored = False #keep watch if youtube streaming not successful
		try:
			true_video_param = return_youtubevideo_params(Url)
			options = {'THREADED_QUEUE_MODE':False}
			stream = CamGear(source=Url, y_tube = True, logging=True, **options).start() # YouTube Video URL as input
			height = 0
			width = 0
			fps = 0
			while True:
				frame = stream.read()
				if frame is None:
					break
				if height == 0 or width == 0:
					fps = stream.framerate
					height,width = frame.shape[:2]
			print('WIDTH: {} HEIGHT: {} FPS: {}'.format(true_video_param[0],true_video_param[1],true_video_param[2]))
			print('WIDTH: {} HEIGHT: {} FPS: {}'.format(width,height,fps))
		except Exception as error:
			print(error)
			errored = True

		if not errored:
			assert true_video_param[0] == width and true_video_param[1] == height and true_video_param[2] == fps
		else:
			print('YouTube playback Test is skipped due to above error!')

	else:
		print('YouTube playback Test is skipped due to bug with Appveyor on Windows builds!')
Example #4
0
def test_youtube_playback(url):
    """
	Testing Youtube Video Playback capabilities of VidGear
	"""
    try:
        height = 0
        width = 0
        fps = 0
        # get params
        stream = CamGear(source=url, y_tube=True,
                         logging=True).start()  # YouTube Video URL as input
        while True:
            frame = stream.read()
            if frame is None:
                break
            if height == 0 or width == 0:
                fps = stream.framerate
                height, width = frame.shape[:2]
                break
        stream.stop()
        # get true params
        true_video_param = return_youtubevideo_params(url)
        # log everything
        logger.debug("WIDTH: {} HEIGHT: {} FPS: {}".format(
            true_video_param[0], true_video_param[1], true_video_param[2]))
        logger.debug("WIDTH: {} HEIGHT: {} FPS: {}".format(width, height, fps))
        # assert true verses ground results
        assert (true_video_param[0] == width and true_video_param[1] == height
                and round(true_video_param[2], 1) == round(fps, 1))
    except Exception as e:
        if isinstance(e, (RuntimeError, ValueError)) and url == "im_not_a_url":
            pass
        else:
            pytest.fail(str(e))
Example #5
0
def test_stream_mode(url, quality, parameters):
    """
    Testing Stream Mode Playback capabilities of CamGear
    """
    try:
        height = 0
        width = 0
        fps = 0
        options = {"STREAM_RESOLUTION": quality, "STREAM_PARAMS": parameters}
        # get params
        stream = CamGear(
            source=url, stream_mode=True, logging=True, **options
        ).start()  # YouTube Video URL as input
        while True:
            frame = stream.read()
            if frame is None:
                break
            if height == 0 or width == 0:
                fps = stream.framerate
                height, width = frame.shape[:2]
                break
        stream.stop()
        logger.debug("WIDTH: {} HEIGHT: {} FPS: {}".format(width, height, fps))
    except Exception as e:
        if isinstance(e, (RuntimeError, ValueError)) and (
            url == "im_not_a_url" or platform.system() in ["Windows", "Darwin"]
        ):
            pass
        else:
            pytest.fail(str(e))
Example #6
0
def test_rtf_livestream(format):
    """
    Testing Real-Time Frames Mode with livestream.
    """
    assets_file_path = return_assets_path(False if format == "dash" else True)

    try:
        # Open stream
        options = {"THREAD_TIMEOUT": 300}
        stream = CamGear(source=return_testvideo_path(), **options).start()
        stream_params = {
            "-livestream": True,
        }
        streamer = StreamGear(output=assets_file_path, format=format, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            streamer.stream(frame)
        stream.stop()
        streamer.terminate()
    except Exception as e:
        if not isinstance(e, queue.Empty):
            pytest.fail(str(e))
Example #7
0
    def process_video(self, person_detector, face_detector, face_segmentor):
        """
        Processes video with person detection, face detection and face segmentation.

        :param person_detector: Person Detection
        :type person_detector: PersonDetector class
        :param face_detector: Face Detection
        :type face_detector: FaceDetector class
        :param face_segmentor: Face Segmentation 
        :type face_segmentor: FaceSegmentation class
        """
        self.person_detector = person_detector
        self.face_detector = face_detector
        self.face_segmentor = face_segmentor

        if if_rtsp(self.video_source):
            vid = VideoStreamer(self.video_source).start()

        if if_webcam(self.video_source):
            vid = CamGear(source=int(self.video_source)).start()

        else:
            vid = CamGear(source=self.video_source).start()

        while True:
            frame = vid.read()
            if frame is None:
                break
            frame = self.process_frame(frame)
            cv2.imshow("VigilantGantry Face Segmentation AI Engine", frame)
            if cv2.waitKey(1) == 27:
                break
        vid.stop()
        cv2.destroyAllWindows()
        return None
Example #8
0
def test_youtube_playback():
	"""
	Testing Youtube Video Playback capabilities of VidGear
	"""
	if os.name != 'nt':
		Url = 'https://youtu.be/dQw4w9WgXcQ'
		result = True
		try:
			true_video_param = return_youtubevideo_params(Url)
			stream = CamGear(source=Url, y_tube = True,  time_delay=2, logging=True).start() # YouTube Video URL as input
			fps = stream.framerate
			height = 0
			width = 0 
			while True:
				frame = stream.read()
				if frame is None:
					result = False
					break
				if height == 0 or width == 0:
					height,width = frame.shape[:2]
					break
			print('WIDTH: {} HEIGHT: {} FPS: {}'.format(true_video_param[0],true_video_param[1],true_video_param[2]))
			print('WIDTH: {} HEIGHT: {} FPS: {}'.format(width,height,fps))
		except Exception as error:
			print(error)
			result = False
		print('Result: {}'.format('Skipped' if not result else 'Displaying...'))	
		if result:
			assert true_video_param[0] == width and true_video_param[1] == height and true_video_param[2] == fps
		else:
			print('YouTube playback Test is skipped, since valid frames are not returned!')
	else:
		print('YouTube playback Test is skipped due to bug with Appveyor on Windows builds!')
Example #9
0
    def init_class(self, cls_args):
        #self.dev = cv2.VideoCapture(2)

        dev_options = {
            'CAP_PROP_FRAME_WIDTH': 1920,
            'CAP_PROP_FRAME_HEIGHT': 1080,
            'CAP_PROP_FPS': 30
        }
        self.dev = CamGear(source=2, **dev_options)
        self.dev.start()
Example #10
0
def vidgear_livestream(url):
    vs = CamGear(source=url, y_tube=True).start()
    while True:
        frame = vs.read()
        if frame is None:
            break
        frame = imutils.resize(frame, width=500)
        cv2.imshow("Output Stream", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
def playback(level):
    """
    tests CamGear API's playback capabilities
    """
    options = {"THREADED_QUEUE_MODE": False}
    stream = CamGear(source=level, **options).start()
    fps = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps.update()
    stream.stop()
    logger.info("approx. FPS: {:.2f}".format(fps.average_fps()))
Example #12
0
def test_rtf_stream(conversion, format):
    """
    Testing Real-Time Frames Mode
    """
    assets_file_path = return_assets_path(False if format == "dash" else True)

    try:
        # Open stream
        options = {"THREAD_TIMEOUT": 300}
        stream = CamGear(
            source=return_testvideo_path(), colorspace=conversion, **options
        ).start()
        stream_params = {
            "-clear_prev_assets": True,
            "-input_framerate": "invalid",
        }
        if format == "hls":
            stream_params.update(
                {
                    "-hls_base_url": return_assets_path(
                        False if format == "dash" else True
                    )
                    + os.sep
                }
            )
        streamer = StreamGear(output=assets_file_path, format=format, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            if conversion == "COLOR_BGR2RGBA":
                streamer.stream(frame, rgb_mode=True)
            else:
                streamer.stream(frame)
        stream.stop()
        streamer.terminate()
        asset_file = [
            os.path.join(assets_file_path, f)
            for f in os.listdir(assets_file_path)
            if f.endswith(".mpd" if format == "dash" else ".m3u8")
        ]
        assert len(asset_file) == 1, "Failed to create asset file!"
        if format == "dash":
            assert check_valid_mpd(asset_file[0]), "Test Failed!"
        else:
            assert extract_meta_video(asset_file[0]), "Test Failed!"
    except Exception as e:
        if not isinstance(e, queue.Empty):
            pytest.fail(str(e))
def playback(level):
	"""
	Function to test VidGear playback capabilities
	"""
	stream = CamGear(source=level).start()
	fps = FPS().start()
	while True:
		frame = stream.read()
		if frame is None:
			break
		fps.update()
	stream.stop()
	fps.stop()
	print("[LOG] total elasped time: {:.2f}".format(fps.total_time_elapsed()))
	print("[LOG] approx. FPS: {:.2f}".format(fps.fps()))
Example #14
0
def playback(level):
    """
	tests CamGear API's playback capabilities
	"""
    options = {'THREADED_QUEUE_MODE': False}
    stream = CamGear(source=level, **options).start()
    fps = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps.update()
    stream.stop()
    fps.stop()
    logger.debug("total elasped time: {:.2f}".format(fps.total_time_elapsed()))
    logger.debug("approx. FPS: {:.2f}".format(fps.fps()))
Example #15
0
def Videocapture_withVidGear(path):
    """
	Function to benchmark VidGear multi-threaded video playback 
	"""
    stream = CamGear(source=path).start()
    fps_Vid = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps_Vid.update()
    fps_Vid.stop()
    stream.stop()
    print("VidGear")
    print("[LOG] total elasped time: {:.2f}".format(
        fps_Vid.total_time_elapsed()))
    print("[LOG] approx. FPS: {:.2f}".format(fps_Vid.fps()))
def crop(camera_id, filename, hrnet_m, hrnet_c, hrnet_j, hrnet_weights,
         hrnet_joints_set, image_resolution, single_person, use_tiny_yolo,
         disable_tracking, max_batch_size, disable_vidgear, save_video,
         video_format, video_framerate, device):
    video_writer = None
    if filename is not None:
        rotation_code = check_video_rotation(filename)
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
        nof_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
    else:
        rotation_code = None
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()
    while True:
        t = time.time()

        if filename is not None or disable_vidgear:
            ret, frame = video.read()
            if ret:
                #Code for bounding box and cropping of the video
                bbox, label, conf = cv.detect_common_objects(frame)
                frame_bounding = draw_bbox(frame, bbox, label, conf)
                #bb.add(image, left, top, right, bottom, label, color)
                if save_video:
                    if video_writer is None:
                        fourcc = cv2.VideoWriter_fourcc(
                            *video_format)  # video format
                        video_writer = cv2.VideoWriter(
                            'output_bounding.avi', fourcc, video_framerate,
                            (frame.shape[1], frame.shape[0]))
                    video_writer.write(frame_bounding)

            if not ret:
                filename = 'output_bounding.avi'
                break
            if rotation_code is not None:
                frame = cv2.rotate(frame, rotation_code)
        else:
            frame = video.read()
            if frame is None:
                break
Example #17
0
def test_threaded_queue_mode(source, options):
    """
    Test for the Thread Queue Mode in CamGear API
    """
    try:
        if platform.system() == "Linux":
            stream_camgear = CamGear(
                source=source, backend=cv2.CAP_FFMPEG, logging=True, **options
            ).start()
        else:
            stream_camgear = CamGear(source=source, logging=True, **options).start()
        camgear_frames_num = 0
        while True:
            frame = stream_camgear.read()
            if frame is None:
                logger.debug("VidGear Total frames: {}".format(camgear_frames_num))
                break

            time.sleep(0.2)  # dummy computational task

            camgear_frames_num += 1
        stream_camgear.stop()
        actual_frame_num = return_total_frame_count()
        if "THREADED_QUEUE_MODE" in options and not options["THREADED_QUEUE_MODE"]:
            # emulate frame skipping
            assert camgear_frames_num < actual_frame_num
        else:
            assert camgear_frames_num == actual_frame_num
    except Exception as e:
        if isinstance(e, RuntimeError) and source == "im_not_a_source.mp4":
            pass
        else:
            pytest.fail(str(e))
def Videocapture_withVidGear(path):
    """
	Function to benchmark VidGear multi-threaded video playback 
	"""
    options = {'THREADED_QUEUE_MODE': False}
    stream = CamGear(source=path, **options).start()
    fps_Vid = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps_Vid.update()
    fps_Vid.stop()
    stream.stop()
    logger.debug("VidGear")
    logger.debug("total elasped time: {:.2f}".format(
        fps_Vid.total_time_elapsed()))
    logger.debug("approx. FPS: {:.2f}".format(fps_Vid.fps()))
Example #19
0
def detectCarsFromVideo():
    url = 'https://www.youtube.com/watch?v=Y1jTEyb3wiI'  #https://www.youtube.com/watch?v=71zeC7LYqLE'
    # stream = CamGear(source=url, y_tube = True, logging=True).start() # YouTube Video URL as input
    stream = CamGear(source=url, stream_mode=True,
                     logging=True).start()  # YouTube Video URL as input

    while (True):
        frame = stream.read()
        controlkey = cv2.waitKey(1)
        if frame is not None:
            cars_frame = getCarsFromFrame(frame)
            cv2.imshow('frame', cars_frame)
        else:
            break
        if controlkey == ord('q'):
            break

    vcap.release()
    cv2.destroyAllWindows()
Example #20
0
def test_network_playback():
	"""
	Testing Direct Network Video Playback capabilities of VidGear(with rtsp streaming)
	"""	
	Url = 'rtsp://184.72.239.149/vod/mp4:BigBuckBunny_175k.mov'
	try:
		output_stream = CamGear(source = Url).start()
		i = 0
		Output_data = []
		while i<10:
			frame = output_stream.read()
			if frame is None:
				break
			Output_data.append(frame)
			i+=1
		output_stream.stop()
		print('Output data shape:', np.array(Output_data).shape)
	except Exception as e:
		pytest.fail(str(e))
Example #21
0
def get_keypoint(camera_id = 0, filename = None, hrnet_c = 48, hrnet_j = 17, hrnet_weights = "./weights/pose_hrnet_w48_384x288.pth", hrnet_joints_set = "coco", image_resolution = '(384, 288)', single_person = True,
         max_batch_size = 16, disable_vidgear = False, device = None):
    if torch.cuda.is_available() and True:
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
    else:
            device = torch.device('cpu')
    image_resolution = ast.literal_eval(image_resolution)
    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
	
    if filename is not None:
        #video = cv2.VideoCapture(filename)
        image = cv2.imread(filename, cv2.IMREAD_COLOR)
        #assert video.isOpened()
    else:
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()
    model = SimpleHRNet(
        hrnet_c,
        hrnet_j,
        hrnet_weights,
        resolution=image_resolution,
        multiperson=not single_person,
        max_batch_size=max_batch_size,
        device=device
    )
    pts = model.predict(image)
    resolution = image.shape
    x_len = resolution[0]
    y_len = resolution[1]
    vector = []
    keypoints = pts[0]
    for pt in keypoints:
        pt = list(pt)
        temp = []
        temp.append((pt[0]/x_len))
        temp.append((pt[1]/y_len))
        vector.extend(temp)

    for i, pt in enumerate(pts):
            frame = draw_points_and_skeleton(image, pt, joints_dict()[hrnet_joints_set]['skeleton'], person_index=i,
                                             points_color_palette='gist_rainbow', skeleton_color_palette='jet',
                                             points_palette_samples=10)

    if has_display:
        output_name = filename.split("\\")
        output_name = output_name[-2] + "_" + output_name[-1]			
        cv2.imwrite('tested2\\'+output_name+'.png', frame)
        cv2.imwrite("keypoints_"+filename+".png", frame)
        cv2.imshow('frame.png', frame)
        k = cv2.waitKey(1)
    return vector
Example #22
0
def test_network_playback():
    """
    Testing Direct Network Video Playback capabilities of VidGear(with rtsp streaming)
    """
    Publictest_rstp_urls = [
        "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov",
        "rtsp://freja.hiof.no:1935/rtplive/definst/hessdalen03.stream",
        "rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa",
        "rtmp://semerkandglb.mediatriple.net:1935/semerkandliveedge/semerkand2",
    ]

    index = 0

    while index < len(Publictest_rstp_urls):
        try:
            output_stream = CamGear(
                source=Publictest_rstp_urls[index], logging=True
            ).start()
            i = 0
            Output_data = []
            while i < 10:
                frame = output_stream.read()
                if frame is None:
                    break
                Output_data.append(frame)
                i += 1
            output_stream.stop()
            logger.debug("Output data shape:", np.array(Output_data).shape)
            if Output_data[-1].shape[:2] > (50, 50):
                break
        except Exception as e:
            if isinstance(e, RuntimeError):
                logger.debug(
                    "`{}` URL is not working".format(Publictest_rstp_urls[index])
                )
                index += 1
                continue
            else:
                pytest.fail(str(e))

    if index == len(Publictest_rstp_urls):
        pytest.fail("Test failed to play any URL!")
Example #23
0
    def get_frames1(self):
        # import required libraries
        from vidgear.gears import CamGear
        import cv2

        # Add YouTube Video URL as input source (for e.g https://youtu.be/bvetuLwJIkA)
        # and enable Stream Mode (`stream_mode = True`)
        stream = CamGear(source=self.__url, stream_mode=True,
                         logging=True).start()

        skip = 0
        # loop over
        while True:

            # read frames from stream
            frame = stream.read()

            # check for frame if Nonetype
            if frame is None:
                break

            if frame is None:
                break
            if self._max_dim is not None:
                frame = resize_if_larger(frame, self._max_dim)

            if skip > 0:
                skip = skip - 1
            else:
                yield frame

            k = cv2.waitKey(1) & 0xFF
            if k == ord("q"):
                break
            elif k == ord("s"):
                skip = 10

        # close output window
        cv2.destroyAllWindows()

        # safely close video stream
        stream.stop()
Example #24
0
def test_threaded_queue_mode():
	"""
	Test for New Thread Queue Mode in CamGear Class
	"""
	actual_frame_num = return_total_frame_count()

	stream_camgear = CamGear(source=return_testvideo_path(), logging=True).start() #start stream on CamGear
	camgear_frames_num = 0
	while True:
		frame = stream_camgear.read()
		if frame is None:
			print(camgear_frames_num)
			break
		
		time.sleep(0.2) #dummy computational task

		camgear_frames_num += 1
	stream_camgear.stop()

	assert camgear_frames_num == actual_frame_num
Example #25
0
class CamSource():
    def init_class(self, cls_args):
        #self.dev = cv2.VideoCapture(2)

        dev_options = {
            'CAP_PROP_FRAME_WIDTH': 1920,
            'CAP_PROP_FRAME_HEIGHT': 1080,
            'CAP_PROP_FPS': 30
        }
        self.dev = CamGear(source=2, **dev_options)
        self.dev.start()
        #width = 1920
        #height = 1080
        #self.dev.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        #self.dev.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        #self.data = b'x' * 1920 * 1080 * 3

    def run(self, args):
        time.sleep(0.032)
        self.frame = self.dev.read()
        return [self.frame, time.time()]
Example #26
0
def scrape_live(url, duration=60, show=False):
    print('Scraping')
    # create pafy object. Just used to extract name of YouTube video
    pafy_vid = pafy.new(url)
    title = folder
    title += pafy_vid.title
    # cleanup title so nicer for video_naming
    title = title.replace(' ', '-')
    title = title.replace('.', '')
    # get time
    now = datetime.now()
    # add time stamp
    title += now.strftime("-%m_%d_%Y-%H_%M_%S")
    file_name = title + '.' + file_type

    stream = CamGear(source=url, y_tube=True, time_delay=1,
                     logging=True).start()
    fourcc = cv2.VideoWriter_fourcc(*codec)
    out = cv2.VideoWriter(file_name, fourcc, fps, (1920, 1080))

    start = time.time()
    frames = 0
    while time.time() - start < duration:
        frame = stream.read()
        frames += 1
        out.write(frame)

        if frame is None:
            break

        if show:
            cv2.imshow('Output Frame', frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
    if show:
        cv2.destroyAllWindows()
    stream.stop()
    out.release()
    print('Done!')
Example #27
0
def test_rtf_livestream():
    """
    Testing Real-Time Frames Mode with livestream.
    """
    mpd_file_path = return_mpd_path()
    try:
        # Open stream
        stream = CamGear(source=return_testvideo_path()).start()
        stream_params = {
            "-livestream": True,
        }
        streamer = StreamGear(output=mpd_file_path, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            streamer.stream(frame)
        stream.stop()
        streamer.terminate()
    except Exception as e:
        pytest.fail(str(e))
Example #28
0
def test_write(conversion):
    """
    Testing WriteGear Compression-Mode(FFmpeg) Writer capabilties in different colorspace with CamGearAPI.
    """
    # Open stream
    stream = CamGear(
        source=return_testvideo_path(), colorspace=conversion, logging=True
    ).start()
    writer = WriteGear(
        output_filename="Output_tw.mp4", custom_ffmpeg=return_static_ffmpeg()
    )  # Define writer
    while True:
        frame = stream.read()
        # check if frame is None
        if frame is None:
            # if True break the infinite loop
            break
        if conversion == "COLOR_BGR2RGBA":
            writer.write(frame, rgb_mode=True)
        elif conversion == "COLOR_BGR2INVALID":
            # test invalid color_space value
            stream.color_space = "wrong_colorspace"
            conversion = "COLOR_BGR2INVALID2"
            writer.write(frame)
        elif conversion == "COLOR_BGR2INVALID2":
            # test wrong color_space value
            stream.color_space = 1546755
            conversion = ""
            writer.write(frame)
        else:
            writer.write(frame)
    stream.stop()
    writer.close()
    basepath, _ = os.path.split(return_static_ffmpeg())
    ffprobe_path = os.path.join(
        basepath, "ffprobe.exe" if os.name == "nt" else "ffprobe"
    )
    result = check_output(
        [
            ffprobe_path,
            "-v",
            "error",
            "-count_frames",
            "-i",
            os.path.abspath("Output_tw.mp4"),
        ]
    )
    if result:
        if not isinstance(result, string_types):
            result = result.decode()
        logger.debug("Result: {}".format(result))
        for i in ["Error", "Invalid", "error", "invalid"]:
            assert not (i in result)
    os.remove(os.path.abspath("Output_tw.mp4"))
Example #29
0
def camera_gear(id):

    cam = cctv_cam(id)
    print(cam)
    options = {
        "CAP_PROP_FRAME_WIDTH": 320,
        "CAP_PROP_FRAME_HEIGHT": 240,
        "CAP_PROP_FPS": 70
    }
    stream = CamGear(source=cam, **options).start()

    while True:

        frame = stream.read()
        frame = imutils.resize(frame, width=450)

        if frame is None:
            break

        frame = cv2.imencode('.png', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/png\r\n\r\n' + frame + b'\r\n')
    stream.stop()
def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights,
         hrnet_joints_set, image_resolution, disable_tracking, max_nof_people,
         max_batch_size, disable_vidgear, save_video, video_format,
         video_framerate, device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available():
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')

    # print(device)

    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
    video_writer = None

    if filename is not None:
        rotation_code = check_video_rotation(filename)
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
    else:
        rotation_code = None
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()

    model = SimpleHigherHRNet(hrnet_c,
                              hrnet_j,
                              hrnet_weights,
                              resolution=image_resolution,
                              return_bounding_boxes=not disable_tracking,
                              max_nof_people=max_nof_people,
                              max_batch_size=max_batch_size,
                              device=device)

    if not disable_tracking:
        prev_boxes = None
        prev_pts = None
        prev_person_ids = None
        next_person_id = 0

    while True:
        t = time.time()

        if filename is not None or disable_vidgear:
            ret, frame = video.read()
            if not ret:
                break
            if rotation_code is not None:
                frame = cv2.rotate(frame, rotation_code)
        else:
            frame = video.read()
            if frame is None:
                break

        pts = model.predict(frame)

        if not disable_tracking:
            boxes, pts = pts

        if not disable_tracking:
            if len(pts) > 0:
                if prev_pts is None and prev_person_ids is None:
                    person_ids = np.arange(next_person_id,
                                           len(pts) + next_person_id,
                                           dtype=np.int32)
                    next_person_id = len(pts) + 1
                else:
                    boxes, pts, person_ids = find_person_id_associations(
                        boxes=boxes,
                        pts=pts,
                        prev_boxes=prev_boxes,
                        prev_pts=prev_pts,
                        prev_person_ids=prev_person_ids,
                        next_person_id=next_person_id,
                        pose_alpha=0.2,
                        similarity_threshold=0.4,
                        smoothing_alpha=0.1,
                    )
                    next_person_id = max(next_person_id,
                                         np.max(person_ids) + 1)
            else:
                person_ids = np.array((), dtype=np.int32)

            prev_boxes = boxes.copy()
            prev_pts = pts.copy()
            prev_person_ids = person_ids

        else:
            person_ids = np.arange(len(pts), dtype=np.int32)

        for i, (pt, pid) in enumerate(zip(pts, person_ids)):
            frame = draw_points_and_skeleton(
                frame,
                pt,
                joints_dict()[hrnet_joints_set]['skeleton'],
                person_index=pid,
                points_color_palette='gist_rainbow',
                skeleton_color_palette='jet',
                points_palette_samples=10)

        fps = 1. / (time.time() - t)
        print('\rframerate: %f fps / detected people: %d' % (fps, len(pts)),
              end='')

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)

        if save_video:
            if video_writer is None:
                fourcc = cv2.VideoWriter_fourcc(*video_format)  # video format
                video_writer = cv2.VideoWriter(
                    'output.avi', fourcc, video_framerate,
                    (frame.shape[1], frame.shape[0]))
            video_writer.write(frame)

    if save_video:
        video_writer.release()