コード例 #1
0
def test_youtube_playback(url):
    """
	Testing Youtube Video Playback capabilities of VidGear
	"""
    try:
        height = 0
        width = 0
        fps = 0
        # get params
        stream = CamGear(source=url, y_tube=True,
                         logging=True).start()  # YouTube Video URL as input
        while True:
            frame = stream.read()
            if frame is None:
                break
            if height == 0 or width == 0:
                fps = stream.framerate
                height, width = frame.shape[:2]
                break
        stream.stop()
        # get true params
        true_video_param = return_youtubevideo_params(url)
        # log everything
        logger.debug("WIDTH: {} HEIGHT: {} FPS: {}".format(
            true_video_param[0], true_video_param[1], true_video_param[2]))
        logger.debug("WIDTH: {} HEIGHT: {} FPS: {}".format(width, height, fps))
        # assert true verses ground results
        assert (true_video_param[0] == width and true_video_param[1] == height
                and round(true_video_param[2], 1) == round(fps, 1))
    except Exception as e:
        if isinstance(e, (RuntimeError, ValueError)) and url == "im_not_a_url":
            pass
        else:
            pytest.fail(str(e))
コード例 #2
0
def test_write(conversion):
	"""
	Testing WriteGear Compression-Mode(FFmpeg) Writer capabilties in different colorspace
	"""
	#Open stream
	stream = CamGear(source=return_testvideo_path(), colorspace = conversion, logging=True).start()
	writer = WriteGear(output_filename = 'Output_tw.mp4',  custom_ffmpeg = return_static_ffmpeg()) #Define writer
	while True:
		frame = stream.read()
		# check if frame is None
		if frame is None:
			#if True break the infinite loop
			break

		if conversion in ['COLOR_BGR2RGB', 'COLOR_BGR2RGBA']:
			writer.write(frame, rgb_mode = True)
		else:
			writer.write(frame)
	stream.stop()
	writer.close()
	basepath, _ = os.path.split(return_static_ffmpeg()) 
	ffprobe_path  = os.path.join(basepath,'ffprobe.exe' if os.name == 'nt' else 'ffprobe')
	result = check_output([ffprobe_path, "-v", "error", "-count_frames", "-i", os.path.abspath('Output_tw.mp4')])
	if result:
		if not isinstance(result, string_types):
			result = result.decode()
		logger.debug('Result: {}'.format(result))
		for i in ["Error", "Invalid", "error", "invalid"]:
			assert not(i in result)
	os.remove(os.path.abspath('Output_tw.mp4'))
コード例 #3
0
def test_rtf_stream(conversion):
    """
    Testing Real-Time Frames Mode
    """
    mpd_file_path = return_mpd_path()
    try:
        # Open stream
        stream = CamGear(source=return_testvideo_path(),
                         colorspace=conversion).start()
        stream_params = {
            "-clear_prev_assets": True,
            "-input_framerate": "invalid",
        }
        streamer = StreamGear(output=mpd_file_path, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            if conversion == "COLOR_BGR2RGBA":
                streamer.stream(frame, rgb_mode=True)
            else:
                streamer.stream(frame)
        stream.stop()
        streamer.terminate()
        mpd_file = [
            os.path.join(mpd_file_path, f) for f in os.listdir(mpd_file_path)
            if f.endswith(".mpd")
        ]
        assert len(mpd_file) == 1, "Failed to create MPD file!"
        assert check_valid_mpd(mpd_file[0])
    except Exception as e:
        pytest.fail(str(e))
コード例 #4
0
ファイル: test_camgear.py プロジェクト: yuhtc/vidgear
def test_stream_mode(url, quality, parameters):
    """
    Testing Stream Mode Playback capabilities of CamGear
    """
    try:
        height = 0
        width = 0
        fps = 0
        options = {"STREAM_RESOLUTION": quality, "STREAM_PARAMS": parameters}
        # get params
        stream = CamGear(
            source=url, stream_mode=True, logging=True, **options
        ).start()  # YouTube Video URL as input
        while True:
            frame = stream.read()
            if frame is None:
                break
            if height == 0 or width == 0:
                fps = stream.framerate
                height, width = frame.shape[:2]
                break
        stream.stop()
        logger.debug("WIDTH: {} HEIGHT: {} FPS: {}".format(width, height, fps))
    except Exception as e:
        if isinstance(e, (RuntimeError, ValueError)) and (
            url == "im_not_a_url" or platform.system() in ["Windows", "Darwin"]
        ):
            pass
        else:
            pytest.fail(str(e))
コード例 #5
0
ファイル: test_camgear.py プロジェクト: yuhtc/vidgear
def test_threaded_queue_mode(source, options):
    """
    Test for the Thread Queue Mode in CamGear API
    """
    try:
        if platform.system() == "Linux":
            stream_camgear = CamGear(
                source=source, backend=cv2.CAP_FFMPEG, logging=True, **options
            ).start()
        else:
            stream_camgear = CamGear(source=source, logging=True, **options).start()
        camgear_frames_num = 0
        while True:
            frame = stream_camgear.read()
            if frame is None:
                logger.debug("VidGear Total frames: {}".format(camgear_frames_num))
                break

            time.sleep(0.2)  # dummy computational task

            camgear_frames_num += 1
        stream_camgear.stop()
        actual_frame_num = return_total_frame_count()
        if "THREADED_QUEUE_MODE" in options and not options["THREADED_QUEUE_MODE"]:
            # emulate frame skipping
            assert camgear_frames_num < actual_frame_num
        else:
            assert camgear_frames_num == actual_frame_num
    except Exception as e:
        if isinstance(e, RuntimeError) and source == "im_not_a_source.mp4":
            pass
        else:
            pytest.fail(str(e))
コード例 #6
0
def test_rtf_livestream(format):
    """
    Testing Real-Time Frames Mode with livestream.
    """
    assets_file_path = return_assets_path(False if format == "dash" else True)

    try:
        # Open stream
        options = {"THREAD_TIMEOUT": 300}
        stream = CamGear(source=return_testvideo_path(), **options).start()
        stream_params = {
            "-livestream": True,
        }
        streamer = StreamGear(output=assets_file_path, format=format, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            streamer.stream(frame)
        stream.stop()
        streamer.terminate()
    except Exception as e:
        if not isinstance(e, queue.Empty):
            pytest.fail(str(e))
コード例 #7
0
def test_write(conversion):
    """
    Testing WriteGear Compression-Mode(FFmpeg) Writer capabilties in different colorspace with CamGearAPI.
    """
    # Open stream
    stream = CamGear(
        source=return_testvideo_path(), colorspace=conversion, logging=True
    ).start()
    writer = WriteGear(
        output_filename="Output_tw.mp4", custom_ffmpeg=return_static_ffmpeg()
    )  # Define writer
    while True:
        frame = stream.read()
        # check if frame is None
        if frame is None:
            # if True break the infinite loop
            break
        if conversion == "COLOR_BGR2RGBA":
            writer.write(frame, rgb_mode=True)
        elif conversion == "COLOR_BGR2INVALID":
            # test invalid color_space value
            stream.color_space = "wrong_colorspace"
            conversion = "COLOR_BGR2INVALID2"
            writer.write(frame)
        elif conversion == "COLOR_BGR2INVALID2":
            # test wrong color_space value
            stream.color_space = 1546755
            conversion = ""
            writer.write(frame)
        else:
            writer.write(frame)
    stream.stop()
    writer.close()
    basepath, _ = os.path.split(return_static_ffmpeg())
    ffprobe_path = os.path.join(
        basepath, "ffprobe.exe" if os.name == "nt" else "ffprobe"
    )
    result = check_output(
        [
            ffprobe_path,
            "-v",
            "error",
            "-count_frames",
            "-i",
            os.path.abspath("Output_tw.mp4"),
        ]
    )
    if result:
        if not isinstance(result, string_types):
            result = result.decode()
        logger.debug("Result: {}".format(result))
        for i in ["Error", "Invalid", "error", "invalid"]:
            assert not (i in result)
    os.remove(os.path.abspath("Output_tw.mp4"))
コード例 #8
0
def test_rtf_stream(conversion, format):
    """
    Testing Real-Time Frames Mode
    """
    assets_file_path = return_assets_path(False if format == "dash" else True)

    try:
        # Open stream
        options = {"THREAD_TIMEOUT": 300}
        stream = CamGear(
            source=return_testvideo_path(), colorspace=conversion, **options
        ).start()
        stream_params = {
            "-clear_prev_assets": True,
            "-input_framerate": "invalid",
        }
        if format == "hls":
            stream_params.update(
                {
                    "-hls_base_url": return_assets_path(
                        False if format == "dash" else True
                    )
                    + os.sep
                }
            )
        streamer = StreamGear(output=assets_file_path, format=format, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            if conversion == "COLOR_BGR2RGBA":
                streamer.stream(frame, rgb_mode=True)
            else:
                streamer.stream(frame)
        stream.stop()
        streamer.terminate()
        asset_file = [
            os.path.join(assets_file_path, f)
            for f in os.listdir(assets_file_path)
            if f.endswith(".mpd" if format == "dash" else ".m3u8")
        ]
        assert len(asset_file) == 1, "Failed to create asset file!"
        if format == "dash":
            assert check_valid_mpd(asset_file[0]), "Test Failed!"
        else:
            assert extract_meta_video(asset_file[0]), "Test Failed!"
    except Exception as e:
        if not isinstance(e, queue.Empty):
            pytest.fail(str(e))
コード例 #9
0
def playback(level):
    """
    tests CamGear API's playback capabilities
    """
    options = {"THREADED_QUEUE_MODE": False}
    stream = CamGear(source=level, **options).start()
    fps = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps.update()
    stream.stop()
    logger.info("approx. FPS: {:.2f}".format(fps.average_fps()))
コード例 #10
0
def playback(level):
	"""
	Function to test VidGear playback capabilities
	"""
	stream = CamGear(source=level).start()
	fps = FPS().start()
	while True:
		frame = stream.read()
		if frame is None:
			break
		fps.update()
	stream.stop()
	fps.stop()
	print("[LOG] total elasped time: {:.2f}".format(fps.total_time_elapsed()))
	print("[LOG] approx. FPS: {:.2f}".format(fps.fps()))
コード例 #11
0
def playback(level):
    """
	tests CamGear API's playback capabilities
	"""
    options = {'THREADED_QUEUE_MODE': False}
    stream = CamGear(source=level, **options).start()
    fps = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps.update()
    stream.stop()
    fps.stop()
    logger.debug("total elasped time: {:.2f}".format(fps.total_time_elapsed()))
    logger.debug("approx. FPS: {:.2f}".format(fps.fps()))
コード例 #12
0
def Videocapture_withVidGear(path):
    """
	Function to benchmark VidGear multi-threaded video playback 
	"""
    stream = CamGear(source=path).start()
    fps_Vid = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps_Vid.update()
    fps_Vid.stop()
    stream.stop()
    print("VidGear")
    print("[LOG] total elasped time: {:.2f}".format(
        fps_Vid.total_time_elapsed()))
    print("[LOG] approx. FPS: {:.2f}".format(fps_Vid.fps()))
コード例 #13
0
def Videocapture_withVidGear(path):
    """
	Function to benchmark VidGear multi-threaded video playback 
	"""
    options = {'THREADED_QUEUE_MODE': False}
    stream = CamGear(source=path, **options).start()
    fps_Vid = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        fps_Vid.update()
    fps_Vid.stop()
    stream.stop()
    logger.debug("VidGear")
    logger.debug("total elasped time: {:.2f}".format(
        fps_Vid.total_time_elapsed()))
    logger.debug("approx. FPS: {:.2f}".format(fps_Vid.fps()))
コード例 #14
0
def test_network_playback():
	"""
	Testing Direct Network Video Playback capabilities of VidGear(with rtsp streaming)
	"""	
	Url = 'rtsp://184.72.239.149/vod/mp4:BigBuckBunny_175k.mov'
	try:
		output_stream = CamGear(source = Url).start()
		i = 0
		Output_data = []
		while i<10:
			frame = output_stream.read()
			if frame is None:
				break
			Output_data.append(frame)
			i+=1
		output_stream.stop()
		print('Output data shape:', np.array(Output_data).shape)
	except Exception as e:
		pytest.fail(str(e))
コード例 #15
0
    def get_frames1(self):
        # import required libraries
        from vidgear.gears import CamGear
        import cv2

        # Add YouTube Video URL as input source (for e.g https://youtu.be/bvetuLwJIkA)
        # and enable Stream Mode (`stream_mode = True`)
        stream = CamGear(source=self.__url, stream_mode=True,
                         logging=True).start()

        skip = 0
        # loop over
        while True:

            # read frames from stream
            frame = stream.read()

            # check for frame if Nonetype
            if frame is None:
                break

            if frame is None:
                break
            if self._max_dim is not None:
                frame = resize_if_larger(frame, self._max_dim)

            if skip > 0:
                skip = skip - 1
            else:
                yield frame

            k = cv2.waitKey(1) & 0xFF
            if k == ord("q"):
                break
            elif k == ord("s"):
                skip = 10

        # close output window
        cv2.destroyAllWindows()

        # safely close video stream
        stream.stop()
コード例 #16
0
ファイル: test_camgear.py プロジェクト: yuhtc/vidgear
def test_network_playback():
    """
    Testing Direct Network Video Playback capabilities of VidGear(with rtsp streaming)
    """
    Publictest_rstp_urls = [
        "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov",
        "rtsp://freja.hiof.no:1935/rtplive/definst/hessdalen03.stream",
        "rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa",
        "rtmp://semerkandglb.mediatriple.net:1935/semerkandliveedge/semerkand2",
    ]

    index = 0

    while index < len(Publictest_rstp_urls):
        try:
            output_stream = CamGear(
                source=Publictest_rstp_urls[index], logging=True
            ).start()
            i = 0
            Output_data = []
            while i < 10:
                frame = output_stream.read()
                if frame is None:
                    break
                Output_data.append(frame)
                i += 1
            output_stream.stop()
            logger.debug("Output data shape:", np.array(Output_data).shape)
            if Output_data[-1].shape[:2] > (50, 50):
                break
        except Exception as e:
            if isinstance(e, RuntimeError):
                logger.debug(
                    "`{}` URL is not working".format(Publictest_rstp_urls[index])
                )
                index += 1
                continue
            else:
                pytest.fail(str(e))

    if index == len(Publictest_rstp_urls):
        pytest.fail("Test failed to play any URL!")
コード例 #17
0
ファイル: test_camgear.py プロジェクト: winnerineast/vidgear
def test_threaded_queue_mode():
	"""
	Test for New Thread Queue Mode in CamGear Class
	"""
	actual_frame_num = return_total_frame_count()

	stream_camgear = CamGear(source=return_testvideo_path(), logging=True).start() #start stream on CamGear
	camgear_frames_num = 0
	while True:
		frame = stream_camgear.read()
		if frame is None:
			print(camgear_frames_num)
			break
		
		time.sleep(0.2) #dummy computational task

		camgear_frames_num += 1
	stream_camgear.stop()

	assert camgear_frames_num == actual_frame_num
コード例 #18
0
def scrape_live(url, duration=60, show=False):
    print('Scraping')
    # create pafy object. Just used to extract name of YouTube video
    pafy_vid = pafy.new(url)
    title = folder
    title += pafy_vid.title
    # cleanup title so nicer for video_naming
    title = title.replace(' ', '-')
    title = title.replace('.', '')
    # get time
    now = datetime.now()
    # add time stamp
    title += now.strftime("-%m_%d_%Y-%H_%M_%S")
    file_name = title + '.' + file_type

    stream = CamGear(source=url, y_tube=True, time_delay=1,
                     logging=True).start()
    fourcc = cv2.VideoWriter_fourcc(*codec)
    out = cv2.VideoWriter(file_name, fourcc, fps, (1920, 1080))

    start = time.time()
    frames = 0
    while time.time() - start < duration:
        frame = stream.read()
        frames += 1
        out.write(frame)

        if frame is None:
            break

        if show:
            cv2.imshow('Output Frame', frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
    if show:
        cv2.destroyAllWindows()
    stream.stop()
    out.release()
    print('Done!')
コード例 #19
0
def test_rtf_livestream():
    """
    Testing Real-Time Frames Mode with livestream.
    """
    mpd_file_path = return_mpd_path()
    try:
        # Open stream
        stream = CamGear(source=return_testvideo_path()).start()
        stream_params = {
            "-livestream": True,
        }
        streamer = StreamGear(output=mpd_file_path, **stream_params)
        while True:
            frame = stream.read()
            # check if frame is None
            if frame is None:
                break
            streamer.stream(frame)
        stream.stop()
        streamer.terminate()
    except Exception as e:
        pytest.fail(str(e))
コード例 #20
0
def camera_gear(id):

    cam = cctv_cam(id)
    print(cam)
    options = {
        "CAP_PROP_FRAME_WIDTH": 320,
        "CAP_PROP_FRAME_HEIGHT": 240,
        "CAP_PROP_FPS": 70
    }
    stream = CamGear(source=cam, **options).start()

    while True:

        frame = stream.read()
        frame = imutils.resize(frame, width=450)

        if frame is None:
            break

        frame = cv2.imencode('.png', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/png\r\n\r\n' + frame + b'\r\n')
    stream.stop()
コード例 #21
0
def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights,
         hrnet_joints_set, image_resolution, disable_tracking, max_nof_people,
         max_batch_size, disable_vidgear, save_video, video_format,
         video_framerate, device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available():
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')

    # print(device)

    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
    video_writer = None

    if filename is not None:
        rotation_code = check_video_rotation(filename)
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
    else:
        rotation_code = None
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()

    model = SimpleHigherHRNet(hrnet_c,
                              hrnet_j,
                              hrnet_weights,
                              resolution=image_resolution,
                              return_bounding_boxes=not disable_tracking,
                              max_nof_people=max_nof_people,
                              max_batch_size=max_batch_size,
                              device=device)

    if not disable_tracking:
        prev_boxes = None
        prev_pts = None
        prev_person_ids = None
        next_person_id = 0

    while True:
        t = time.time()

        if filename is not None or disable_vidgear:
            ret, frame = video.read()
            if not ret:
                break
            if rotation_code is not None:
                frame = cv2.rotate(frame, rotation_code)
        else:
            frame = video.read()
            if frame is None:
                break

        pts = model.predict(frame)

        if not disable_tracking:
            boxes, pts = pts

        if not disable_tracking:
            if len(pts) > 0:
                if prev_pts is None and prev_person_ids is None:
                    person_ids = np.arange(next_person_id,
                                           len(pts) + next_person_id,
                                           dtype=np.int32)
                    next_person_id = len(pts) + 1
                else:
                    boxes, pts, person_ids = find_person_id_associations(
                        boxes=boxes,
                        pts=pts,
                        prev_boxes=prev_boxes,
                        prev_pts=prev_pts,
                        prev_person_ids=prev_person_ids,
                        next_person_id=next_person_id,
                        pose_alpha=0.2,
                        similarity_threshold=0.4,
                        smoothing_alpha=0.1,
                    )
                    next_person_id = max(next_person_id,
                                         np.max(person_ids) + 1)
            else:
                person_ids = np.array((), dtype=np.int32)

            prev_boxes = boxes.copy()
            prev_pts = pts.copy()
            prev_person_ids = person_ids

        else:
            person_ids = np.arange(len(pts), dtype=np.int32)

        for i, (pt, pid) in enumerate(zip(pts, person_ids)):
            frame = draw_points_and_skeleton(
                frame,
                pt,
                joints_dict()[hrnet_joints_set]['skeleton'],
                person_index=pid,
                points_color_palette='gist_rainbow',
                skeleton_color_palette='jet',
                points_palette_samples=10)

        fps = 1. / (time.time() - t)
        print('\rframerate: %f fps / detected people: %d' % (fps, len(pts)),
              end='')

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)

        if save_video:
            if video_writer is None:
                fourcc = cv2.VideoWriter_fourcc(*video_format)  # video format
                video_writer = cv2.VideoWriter(
                    'output.avi', fourcc, video_framerate,
                    (frame.shape[1], frame.shape[0]))
            video_writer.write(frame)

    if save_video:
        video_writer.release()
コード例 #22
0
ファイル: live-demo.py プロジェクト: timtensor/simple-HRNet
def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights,
         hrnet_joints_set, single_person, max_batch_size, disable_vidgear,
         device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available() and True:
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')

    print(device)

    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'

    if filename is not None:
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
    else:
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()

    model = SimpleHRNet(hrnet_c,
                        hrnet_j,
                        hrnet_weights,
                        multiperson=not single_person,
                        max_batch_size=max_batch_size,
                        device=device)

    while True:
        if disable_vidgear:
            ret, frame = video.read()
            if not ret:
                break
        else:
            frame = video.read()
            if frame is None:
                break

        pts = model.predict(frame)

        for i, pt in enumerate(pts):
            frame = draw_points_and_skeleton(
                frame,
                pt,
                joints_dict()[hrnet_joints_set]['skeleton'],
                person_index=i,
                joints_color_palette='gist_rainbow',
                skeleton_color_palette='jet',
                joints_palette_samples=10)

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)
コード例 #23
0
ファイル: app.py プロジェクト: aniketchandak/YogAi
def open_app(camera_id=0,
             filename=None,
             hrnet_c=48,
             hrnet_j=17,
             hrnet_weights="./weights/pose_hrnet_w48_384x288.pth",
             hrnet_joints_set="coco",
             image_resolution='(384, 288)',
             single_person=True,
             max_batch_size=16,
             disable_vidgear=False,
             device=None):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available() and True:
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')
    image_resolution = ast.literal_eval(image_resolution)
    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
    if filename is not None:
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
    else:
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()

    model = SimpleHRNet(hrnet_c,
                        hrnet_j,
                        hrnet_weights,
                        resolution=image_resolution,
                        multiperson=not single_person,
                        max_batch_size=max_batch_size,
                        device=device)
    loaded_model = pickle.load(open("mlp_model_best.sav", 'rb'))
    no_to_label = {
        0: "tree",
        1: "warrior1",
        2: "warrior2",
        3: "childs",
        4: "downwarddog",
        5: "plank",
        6: "mountain",
        7: "trianglepose"
    }
    image_to_blob = {}
    for id, path in no_to_label.items():
        images = [
            cv2.imread(file)
            for file in glob.glob('sampleposes\\' + path + '.jpg')
        ]
        image_to_blob[id] = images
    while True:
        if filename is not None or disable_vidgear:
            ret, frame = video.read()
            if not ret:
                break
        else:
            frame = video.read()
            if frame is None:
                break
        pts = model.predict(frame)
        resolution = frame.shape
        x_len = resolution[0]
        y_len = resolution[1]
        vector = []
        if len(pts) == 0:
            continue
        keypoints = pts[0]

        for pt in keypoints:
            pt = list(pt)
            temp = []
            temp.append((pt[0] / x_len))
            temp.append((pt[1] / y_len))
            vector.extend(temp)

        vector = list(vector)
        predicted_pose = loaded_model.predict([vector])
        text = no_to_label[predicted_pose[0]] + " pose"
        cv2.putText(image_to_blob[predicted_pose[0]][0], text,
                    bottomLeftCornerOfText, font, fontScale, fontColor,
                    lineType)
        cv2.imshow("Suggestion", image_to_blob[predicted_pose[0]][0])
        k = cv2.waitKey(1)
        for i, pt in enumerate(pts):
            frame = draw_points_and_skeleton(
                frame,
                pt,
                joints_dict()[hrnet_joints_set]['skeleton'],
                person_index=i,
                points_color_palette='gist_rainbow',
                skeleton_color_palette='jet',
                points_palette_samples=10)

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)
コード例 #24
0
ファイル: live-demo.py プロジェクト: jaeseo-park/simple-HRNet
def main(camera_id, filename, hrnet_m, hrnet_c, hrnet_j, hrnet_weights,
         hrnet_joints_set, image_resolution, single_person, use_tiny_yolo,
         disable_tracking, max_batch_size, disable_vidgear, save_video,
         video_format, video_framerate, device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available():
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')

    image_resolution = ast.literal_eval(image_resolution)
    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
    video_writer = None
    falldown = FallDown()

    if foldername is not None:
        images_path = foldername
        images_name = os.listdir(images_path)
        images_path = [os.path.join(images_path, name) for name in images_name]
        images_path.sort()

    else:
        if filename is not None:
            rotation_code = check_video_rotation(filename)
            video = cv2.VideoCapture(filename)
            assert video.isOpened()
        else:
            rotation_code = None
            if disable_vidgear:
                video = cv2.VideoCapture(camera_id)
                assert video.isOpened()
            else:
                video = CamGear(camera_id).start()

    if use_tiny_yolo:
        yolo_model_def = "./models/detectors/yolo/config/yolov3-tiny.cfg"
        yolo_class_path = "./models/detectors/yolo/data/coco.names"
        yolo_weights_path = "./models/detectors/yolo/weights/yolov3-tiny.weights"
    else:
        yolo_model_def = "./models/detectors/yolo/config/yolov3.cfg"
        yolo_class_path = "./models/detectors/yolo/data/coco.names"
        yolo_weights_path = "./models/detectors/yolo/weights/yolov3.weights"

    model = SimpleHRNet(hrnet_c,
                        hrnet_j,
                        hrnet_weights,
                        model_name=hrnet_m,
                        resolution=image_resolution,
                        multiperson=not single_person,
                        return_bounding_boxes=not disable_tracking,
                        max_batch_size=max_batch_size,
                        yolo_model_def=yolo_model_def,
                        yolo_class_path=yolo_class_path,
                        yolo_weights_path=yolo_weights_path,
                        device=device)

    if not disable_tracking:
        prev_boxes = None
        prev_pts = None
        prev_person_ids = None
        next_person_id = 0

    step = 0
    while True:
        t = time.time()

        if foldername is None:
            if filename is not None or disable_vidgear:
                ret, frame = video.read()
                if not ret:
                    break
                if rotation_code is not None:
                    frame = cv2.rotate(frame, rotation_code)
            else:
                frame = video.read()
                if frame is None:
                    break

        else:
            if step >= len(images_path):
                break

            # Pre-process
            images = []
            images_origin = []

            path = images_path[step]

            frame = cv2.imread(path, cv2.IMREAD_COLOR)
            if frame is None:
                logging.error("read image error: {}. skip it.".format(path))
                continue

        pts = model.predict(frame)

        if not disable_tracking:
            boxes, pts = pts

        if not disable_tracking:
            if len(pts) > 0:
                if prev_pts is None and prev_person_ids is None:
                    person_ids = np.arange(next_person_id,
                                           len(pts) + next_person_id,
                                           dtype=np.int32)
                    next_person_id = len(pts) + 1
                else:
                    boxes, pts, person_ids = find_person_id_associations(
                        boxes=boxes,
                        pts=pts,
                        prev_boxes=prev_boxes,
                        prev_pts=prev_pts,
                        prev_person_ids=prev_person_ids,
                        next_person_id=next_person_id,
                        pose_alpha=0.2,
                        similarity_threshold=0.4,
                        smoothing_alpha=0.1,
                    )
                    next_person_id = max(next_person_id,
                                         np.max(person_ids) + 1)
            else:
                person_ids = np.array((), dtype=np.int32)

            prev_boxes = boxes.copy()
            prev_pts = pts.copy()
            prev_person_ids = person_ids

        else:
            person_ids = np.arange(len(pts), dtype=np.int32)

        for i, (pt, pid) in enumerate(zip(pts, person_ids)):
            frame = draw_points_and_skeleton(
                frame,
                pt,
                joints_dict()[hrnet_joints_set]['skeleton'],
                person_index=pid,
                points_color_palette='gist_rainbow',
                skeleton_color_palette='jet',
                points_palette_samples=10)

            frame = falldown.check_fall_down(
                frame, pt,
                joints_dict()[hrnet_joints_set]['skeleton'], video_framerate)

        fps = 1. / (time.time() - t)
        print('\rframerate: %f fps' % fps, end='')

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)

        if save_video:
            if video_writer is None:
                fourcc = cv2.VideoWriter_fourcc(*video_format)  # video format
                video_writer = cv2.VideoWriter(
                    'output.avi', fourcc, video_framerate,
                    (frame.shape[1], frame.shape[0]))
            video_writer.write(frame)

    if save_video:
        video_writer.release()
コード例 #25
0
                 backend=cv2.CAP_V4L,
                 logging=True,
                 **options).start()
writer = WriteGear(output_filename='rtsp://192.168.0.100:5541/test',
                   compression_mode=True,
                   logging=True,
                   **output_params)

i = 0
faces = []
while True:
    try:
        i += 1
        frame = stream.read()
        if frame is not None:
            # 半秒读取下人脸
            if i % 12 == 0:
                faces = detect_face(frame)
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)
            writer.write(frame)
        else:
            print("no frame")
    except KeyboardInterrupt:
        break

# safely close video stream
stream.stop()
writer.close()
print("time:", datetime.datetime.now().timestamp() - t.timestamp())
コード例 #26
0
    if area_set == 0 and area != 0:
        area_set = area
        intial_dim = (left, top, rect_width, rect_height)
    if area > area_set:
        tracker = cv2.TrackerMedianFlow_create()
        tracker.init(frame, intial_dim)
        print("setting to original ...")
    if area < area_set - ((10 * area_set) / 100):
        q.put(True)
    else:
        q.put(False)
    # if (area == 0) and area_set > 0:
    #     area_set = 0
    #     tracker = cv2.TrackerMedianFlow_create()
    #     q.put(False)
    cv2.namedWindow("Main Window", cv2.WINDOW_NORMAL)
    cv2.putText(frame, str(area), (int(left), int(top)), 1,
                cv2.FONT_HERSHEY_DUPLEX, (124, 50, 145), 2)
    cv2.rectangle(frame, (int(left), int(top)), (int(right), int(bottom)),
                  (124, 50, 145), 3)
    cv2.imshow("Main Window", frame)
    out.write(frame)
    key = cv2.waitKey(1)
    if key == ord("q"):
        break

# cleanup the camera and close any open windows
vs.stop()
out.release()
cv2.destroyAllWindows()
コード例 #27
0
def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, image_resolution, single_person,
         max_batch_size, disable_vidgear, save_video, video_format, video_framerate, device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available() and True:
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')

    print(device)

    image_resolution = ast.literal_eval(image_resolution)
    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
    video_writer = None

    if filename is not None:
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
    else:
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()

    model = SimpleHRNet(
        hrnet_c,
        hrnet_j,
        hrnet_weights,
        resolution=image_resolution,
        multiperson=not single_person,
        max_batch_size=max_batch_size,
        device=device
    )

    while True:
        t = time.time()

        if filename is not None or disable_vidgear:
            ret, frame = video.read()
            if not ret:
                break
        else:
            frame = video.read()
            if frame is None:
                break

        pts = model.predict(frame)

        for i, pt in enumerate(pts):
            frame = draw_points_and_skeleton(frame, pt, joints_dict()[hrnet_joints_set]['skeleton'], person_index=i,
                                             points_color_palette='gist_rainbow', skeleton_color_palette='jet',
                                             points_palette_samples=10)

        fps = 1. / (time.time() - t)
        print('\rframerate: %f fps' % fps, end='')

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)

        if save_video:
            if video_writer is None:
                fourcc = cv2.VideoWriter_fourcc(*video_format)  # video format
                video_writer = cv2.VideoWriter('output.avi', fourcc, video_framerate, (frame.shape[1], frame.shape[0]))
            video_writer.write(frame)

    if save_video:
        video_writer.release()
コード例 #28
0
    def main(self):

        if self.args.device is not None:
            device = torch.device(self.args.device)
        else:
            if torch.cuda.is_available() and True:
                torch.backends.cudnn.deterministic = True
                device = torch.device('cuda:0')
            else:
                device = torch.device('cpu')

        print(device)

        image_resolution = ast.literal_eval(self.args.image_resolution)
        has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
        has_display = False
        if self.args.filename is not None:
            video = cv2.VideoCapture(self.args.filename)
            assert video.isOpened()
        else:
            if self.args.disable_vidgear:
                video = cv2.VideoCapture(self.args.camera_id)
                assert video.isOpened()
            else:
                video = CamGear(self.args.camera_id).start()

        model = SimpleHRNet(self.args.hrnet_c,
                            self.args.hrnet_j,
                            self.args.hrnet_weights,
                            resolution=image_resolution,
                            multiperson=not self.args.single_person,
                            max_batch_size=self.args.max_batch_size,
                            device=device)
        num_of_frame = 0
        num_of_std = 0
        start = False
        flag = False
        root = os.path.join(self.args.save_root, 'test')

        if not os.path.exists(root):
            os.mkdir(root)

        while True:
            if self.args.filename is not None or self.args.disable_vidgear:
                ret, self.frame = video.read()
                if not ret:
                    break
            else:
                self.frame = video.read()
                if self.frame is None:
                    break

            pts = model.predict(self.frame)

            for i, pt in enumerate(pts):
                self.frame = draw_points_and_skeleton(
                    self.frame,
                    pt,
                    joints_dict()[self.args.hrnet_joints_set]['skeleton'],
                    person_index=i,
                    points_color_palette='gist_rainbow',
                    skeleton_color_palette='jet',
                    points_palette_samples=10)

            # if not start:
            #     #print('pts', pts)
            #     angel = cal_angle(pts, 'start')
            #     start = True if angel <= 20 else False

            if not start:
                self.text_ready = 'please ready'
                cv2.putText(self.frame, self.text_ready, (50, 50),
                            cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
                angel = self.cal_angle(pts, 'start')
                start = True if angel <= 5 else False

            if start:
                self.text_elbow_touch_knee = 'please elbow touch knee'
                cv2.putText(self.frame, self.text_elbow_touch_knee, (50, 50),
                            cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)

            if has_display:
                cv2.imshow('frame.png', self.frame)
                k = cv2.waitKey(1)
                if k == 27:  # Esc button
                    if self.args.disable_vidgear:
                        video.release()
                    else:
                        video.stop()
                    break
            else:
                angle = self.cal_angle(pts, 'stardard')
                if angle <= 50 and start:
                    text = "count_{}".format(num_of_std)

                    self.count(self.frame, text, num_of_frame, root, video)

                    start = False
                    num_of_std += 1
                    flag = True

                elif angle <= 50 and not start and not flag:
                    self.text_error = 'fault wrong hands action'
                    cv2.putText(self.frame, self.text_error, (330, 50),
                                cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
                    self.text = "count_{}".format(num_of_std)
                    self.count(self.frame, self.text, num_of_frame, root,
                               video)
                    #print(type(frame))
                else:
                    self.text = "count_{}".format(num_of_std)
                    self.count(self.frame, self.text, num_of_frame, root,
                               video)

            #print('num_of_frame', num_of_frame)
            #print('pts', pts)
            num_of_frame += 1
コード例 #29
0
def main(camera_id, filename, hrnet_m, hrnet_c, hrnet_j, hrnet_weights,
         hrnet_joints_set, image_resolution, single_person, use_tiny_yolo,
         disable_tracking, max_batch_size, disable_vidgear, save_video,
         video_format, video_framerate, device, exercise_type):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available():
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')

    # print(device)
    image_resolution = ast.literal_eval(image_resolution)
    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
    video_writer = None

    if filename is not None:
        video = cv2.VideoCapture(filename)
        assert video.isOpened()
    else:
        if disable_vidgear:
            video = cv2.VideoCapture(camera_id)
            assert video.isOpened()
        else:
            video = CamGear(camera_id).start()
    if use_tiny_yolo:
        yolo_model_def = "./models/detectors/yolo/config/yolov3-tiny.cfg"
        yolo_class_path = "./models/detectors/yolo/data/coco.names"
        yolo_weights_path = "./models/detectors/yolo/weights/yolov3-tiny.weights"
    else:
        yolo_model_def = "./models/detectors/yolo/config/yolov3.cfg"
        yolo_class_path = "./models/detectors/yolo/data/coco.names"
        yolo_weights_path = "./models/detectors/yolo/weights/yolov3.weights"

    model = SimpleHRNet(hrnet_c,
                        hrnet_j,
                        hrnet_weights,
                        model_name=hrnet_m,
                        resolution=image_resolution,
                        multiperson=not single_person,
                        return_bounding_boxes=not disable_tracking,
                        max_batch_size=max_batch_size,
                        yolo_model_def=yolo_model_def,
                        yolo_class_path=yolo_class_path,
                        yolo_weights_path=yolo_weights_path,
                        device=device)

    if not disable_tracking:
        prev_boxes = None
        prev_pts = None
        prev_person_ids = None
        next_person_id = 0

    flag = 0
    prev_flag = flag
    counter = 0
    angle = 0
    dist = 0
    prev_dist = dist

    while True:
        t = time.time()

        if filename is not None or disable_vidgear:
            ret, frame = video.read()
            if not ret:
                break

        else:
            frame = video.read()
            if frame is None:
                break

        pts = model.predict(frame)

        if not disable_tracking:
            boxes, pts = pts

        if not disable_tracking:
            if len(pts) > 0:
                if prev_pts is None and prev_person_ids is None:
                    person_ids = np.arange(next_person_id,
                                           len(pts) + next_person_id,
                                           dtype=np.int32)
                    next_person_id = len(pts) + 1
                else:
                    boxes, pts, person_ids = find_person_id_associations(
                        boxes=boxes,
                        pts=pts,
                        prev_boxes=prev_boxes,
                        prev_pts=prev_pts,
                        prev_person_ids=prev_person_ids,
                        next_person_id=next_person_id,
                        pose_alpha=0.2,
                        similarity_threshold=0.4,
                        smoothing_alpha=0.1,
                    )
                    next_person_id = max(next_person_id,
                                         np.max(person_ids) + 1)

            else:
                person_ids = np.array((), dtype=np.int32)

            prev_boxes = boxes.copy()
            prev_pts = pts.copy()
            prev_person_ids = person_ids

        else:
            person_ids = np.arange(len(pts), dtype=np.int32)

        start_point = (45, 5)
        end_point = (1300, 250)
        colorr = (0, 0, 0)
        thicknessr = -1
        frame = cv2.rectangle(frame, start_point, end_point, colorr,
                              thicknessr)

        if exercise_type == 1:  #for pushUps

            for i, (pt, pid) in enumerate(zip(pts, person_ids)):
                frame, angle = draw_points_and_skeleton(
                    frame,
                    pt,
                    joints_dict()[hrnet_joints_set]['skeleton'],
                    person_index=pid,
                    points_color_palette='gist_rainbow',
                    skeleton_color_palette='jet',
                    points_palette_samples=10,
                    exercise_type=1)

            fps = 1. / (time.time() - t)
            print('\rframerate: %f fps' % fps, end='')

            #angle=findangle(frame, pts, joints_dict()[hrnet_joints_set]['skeleton'])
            font = cv2.FONT_HERSHEY_SIMPLEX
            x, y, l = frame.shape
            org = (50, 80)
            fontScale = 1
            color = (255, 255, 255)
            thickness = 2
            frame = cv2.putText(frame, str(angle), org, font, fontScale, color,
                                thickness, cv2.LINE_AA)
            if (len(pts) > 0):
                if (angle > 150):
                    flag = 0
                if (angle < 90):
                    flag = 1
                if (prev_flag == 1 and flag == 0):
                    counter = counter + 1

            prev_flag = flag

            font = cv2.FONT_HERSHEY_SIMPLEX
            x, y, l = frame.shape
            org = (50, 180)
            fontScale = 4
            color = (255, 255, 255)
            thickness = 8
            text = "PushUps Count=" + str(counter)
            frame = cv2.putText(frame, text, org, font, fontScale, color,
                                thickness, cv2.LINE_AA)

        elif exercise_type == 2:  #for sitUps

            for i, (pt, pid) in enumerate(zip(pts, person_ids)):
                frame, angle = draw_points_and_skeleton(
                    frame,
                    pt,
                    joints_dict()[hrnet_joints_set]['skeleton'],
                    person_index=pid,
                    points_color_palette='gist_rainbow',
                    skeleton_color_palette='jet',
                    points_palette_samples=10,
                    exercise_type=2)

            fps = 1. / (time.time() - t)
            print('\rframerate: %f fps' % fps, end='')

            #angle=findangle(frame, pts, joints_dict()[hrnet_joints_set]['skeleton'])
            font = cv2.FONT_HERSHEY_SIMPLEX
            x, y, l = frame.shape
            org = (50, 80)
            fontScale = 1
            color = (255, 255, 255)
            thickness = 2
            frame = cv2.putText(frame, str(angle), org, font, fontScale, color,
                                thickness, cv2.LINE_AA)
            if (len(pts) > 0):
                if (angle > 168):
                    flag = 0
                if (angle < 48):
                    flag = 1
                if (prev_flag == 1 and flag == 0):
                    counter = counter + 1

            prev_flag = flag

            font = cv2.FONT_HERSHEY_SIMPLEX
            x, y, l = frame.shape
            org = (50, 180)
            fontScale = 4
            color = (255, 255, 255)
            thickness = 8
            text = "Squats Count=" + str(counter)
            frame = cv2.putText(frame, text, org, font, fontScale, color,
                                thickness, cv2.LINE_AA)

        elif exercise_type == 3:  #for ChinUps

            for i, (pt, pid) in enumerate(zip(pts, person_ids)):
                frame, dist = draw_points_and_skeleton(
                    frame,
                    pt,
                    joints_dict()[hrnet_joints_set]['skeleton'],
                    person_index=pid,
                    points_color_palette='gist_rainbow',
                    skeleton_color_palette='jet',
                    points_palette_samples=10,
                    exercise_type=3)

            fps = 1. / (time.time() - t)
            print('\rframerate: %f fps' % fps, end='')

            #angle=findangle(frame, pts, joints_dict()[hrnet_joints_set]['skeleton'])
            font = cv2.FONT_HERSHEY_SIMPLEX
            x, y, l = frame.shape
            org = (50, 80)
            fontScale = 1
            color = (255, 255, 255)
            thickness = 2
            frame = cv2.putText(frame, str(dist), org, font, fontScale, color,
                                thickness, cv2.LINE_AA)
            if (len(pts) > 0):
                if (dist == -1 and prev_dist == 1):
                    counter = counter + 1

            prev_dist = dist

            font = cv2.FONT_HERSHEY_SIMPLEX
            x, y, l = frame.shape
            org = (50, 180)
            fontScale = 4
            color = (255, 255, 255)
            thickness = 8
            text = "ChinUps Count=" + str(counter)
            frame = cv2.putText(frame, text, org, font, fontScale, color,
                                thickness, cv2.LINE_AA)

        if has_display:
            cv2.imshow('frame.png', frame)
            k = cv2.waitKey(1)
            if k == 27:  # Esc button
                if disable_vidgear:
                    video.release()
                else:
                    video.stop()
                break
        else:
            cv2.imwrite('frame.png', frame)

        if save_video:
            if video_writer is None:
                fourcc = cv2.VideoWriter_fourcc(*video_format)  # video format
                video_writer = cv2.VideoWriter(
                    'arnleft.avi', fourcc, video_framerate,
                    (frame.shape[1], frame.shape[0]))
            video_writer.write(frame)

    if save_video:
        video_writer.release()
コード例 #30
0
    def main(self, args):
        if args.device is not None:
            device = torch.device(args.device)
        else:
            if torch.cuda.is_available() and True:
                torch.backends.cudnn.deterministic = True
                device = torch.device('cuda:0')
            else:
                device = torch.device('cpu')

        print(device)

        image_resolution = ast.literal_eval(args.image_resolution)
        has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
        has_display = False
        if args.filename is not None:
            video = cv2.VideoCapture(args.filename)
            assert video.isOpened()
        else:
            if args.disable_vidgear:
                video = cv2.VideoCapture(args.camera_id)
                assert video.isOpened()
            else:
                video = CamGear(args.camera_id).start()
        model = SimpleHRNet(
            args.hrnet_c,
            args.hrnet_j,
            args.hrnet_weights,
            resolution=image_resolution,
            multiperson=not args.single_person,
            #multiperson= False,
            max_batch_size=args.max_batch_size,
            device=device)
        num_of_frame = 0
        self.num_of_std = 0
        self.error_box_text = ' '
        start = False
        flag = False
        root = os.path.join(args.save_root, 'sit_ups_v4_test')

        if not os.path.exists(root):
            os.mkdir(root)

        while True:
            if args.filename is not None or args.disable_vidgear:
                ret, self.frame = video.read()
                if not ret:
                    break
            else:
                self.frame = video.read()
                if self.frame is None:
                    break
            pts = model.predict(self.frame)

            for i, pt in enumerate(pts):
                self.frame = draw_points_and_skeleton(
                    self.frame,
                    pt,
                    joints_dict()[args.hrnet_joints_set]['skeleton'],
                    person_index=i,
                    points_color_palette='gist_rainbow',
                    skeleton_color_palette='jet',
                    points_palette_samples=10)
            print('pts', pts)
            if not start:
                self.text_ready = '请双肩着地,双手抱头'
                angle_stg, angle_sew, angle_hma_start = self.cal_angle(
                    pts, 'start')
                if angle_stg <= 5 and angle_sew <= 90 and angle_hma_start <= 10:
                    start = True
                else:
                    start = False
                self.state_box_text = self.text_ready

            elif start:
                self.text_elbow_touch_knee = '请双手抱头坐起肘部触膝'
                self.state_box_text = self.text_elbow_touch_knee

            if has_display:
                cv2.imshow('frame.png', self.frame)
                k = cv2.waitKey(1)
                if k == 27:  # Esc button
                    if args.disable_vidgear:
                        video.release()
                    else:
                        video.stop()
                    break
            else:
                ratio_between_distance, angle_hks, angle_hma_standard, x_diff_elbow_knee, avg_conf = self.cal_angle(
                    pts, 'stardard')
                print('avg_conf', avg_conf)
                if avg_conf < 0.2:
                    start = False
                    self.text = "count_{}".format(self.num_of_std)
                    self.count(self.frame, self.text, num_of_frame, root,
                               video)
                    num_of_frame += 1
                    continue

                raise_feet = False if np.absolute(
                    angle_hma_start - angle_hma_standard) <= 5 else True
                if angle_hks <= 70 and start and (
                        ratio_between_distance
                        or x_diff_elbow_knee < 0) and not raise_feet:
                    self.text = "count_{}".format(self.num_of_std)
                    self.count(self.frame, self.text, num_of_frame, root,
                               video)
                    self.num_of_std += 1
                    start = False
                    flag = True
                elif angle_hks <= 70 and (
                        ratio_between_distance or x_diff_elbow_knee < 0
                ) and not raise_feet and not start and not flag:
                    self.text_error = '犯规,手部动作不规范'
                    self.error_box_text = self.text_error
                    self.text = "count_{}".format(self.num_of_std)
                    self.count(self.frame, self.text, num_of_frame, root,
                               video)
                else:
                    self.text = "count_{}".format(self.num_of_std)
                    self.count(self.frame, self.text, num_of_frame, root,
                               video)

            #yield (self.state_box_text, self.error_box_text, self.frame, self.num_of_std)
            #print('time', time.time() - start_time)
            self.error_box_text = ' '
            num_of_frame += 1