def test_write(conversion):
	"""
	Testing VidGear Non-Compression(OpenCV) Mode Writer
	"""
	stream = cv2.VideoCapture(return_testvideo_path())
	writer = WriteGear(output_filename = 'Output_twc.avi', compression_mode = False) #Define writer
	while True:
		(grabbed, frame) = stream.read()
		# read frames
		# check if frame empty
		if not grabbed:
			#if True break the infinite loop
			break
		if conversion:
			frame = cv2.cvtColor(frame, capPropId(conversion))
		writer.write(frame)
	stream.release()
	writer.close()
	basepath, _ = os.path.split(return_static_ffmpeg())
	ffprobe_path  = os.path.join(basepath,'ffprobe.exe' if os.name == 'nt' else 'ffprobe')
	result = check_output([ffprobe_path, "-v", "error", "-count_frames", "-i", os.path.abspath('Output_twc.avi')])
	if result:
		if not isinstance(result, string_types):
			result = result.decode()
		print('Result: {}'.format(result))
		for i in ["Error", "Invalid", "error", "invalid"]:
			assert not(i in result)
	os.remove(os.path.abspath('Output_twc.avi'))
def test_write(conversion):
	"""
	Testing WriteGear Compression-Mode(FFmpeg) Writer capabilties in different colorspace
	"""
	#Open stream
	stream = CamGear(source=return_testvideo_path(), colorspace = conversion, logging=True).start()
	writer = WriteGear(output_filename = 'Output_tw.mp4',  custom_ffmpeg = return_static_ffmpeg()) #Define writer
	while True:
		frame = stream.read()
		# check if frame is None
		if frame is None:
			#if True break the infinite loop
			break

		if conversion in ['COLOR_BGR2RGB', 'COLOR_BGR2RGBA']:
			writer.write(frame, rgb_mode = True)
		else:
			writer.write(frame)
	stream.stop()
	writer.close()
	basepath, _ = os.path.split(return_static_ffmpeg()) 
	ffprobe_path  = os.path.join(basepath,'ffprobe.exe' if os.name == 'nt' else 'ffprobe')
	result = check_output([ffprobe_path, "-v", "error", "-count_frames", "-i", os.path.abspath('Output_tw.mp4')])
	if result:
		if not isinstance(result, string_types):
			result = result.decode()
		logger.debug('Result: {}'.format(result))
		for i in ["Error", "Invalid", "error", "invalid"]:
			assert not(i in result)
	os.remove(os.path.abspath('Output_tw.mp4'))
def test_WriteGear_compression(f_name, output_params, result):
    """
    Testing VidGear Non-Compression(OpenCV) Mode with different parameters
    """
    try:
        stream = cv2.VideoCapture(return_testvideo_path())
        writer = WriteGear(
            output_filename=f_name,
            compression_mode=False,
            logging=True,
            **output_params
        )
        while True:
            (grabbed, frame) = stream.read()
            if not grabbed:
                break
            writer.write(frame)
        stream.release()
        writer.close()
        remove_file_safe(f_name)
    except Exception as e:
        if result:
            pytest.fail(str(e))
        else:
            logger.exception(str(e))
Beispiel #4
0
def test_input_framerate(c_ffmpeg):
    """
    Testing "-input_framerate" parameter provided by WriteGear(in Compression Mode)
    """
    stream = cv2.VideoCapture(return_testvideo_path())  # Open stream
    test_video_framerate = stream.get(cv2.CAP_PROP_FPS)
    output_params = (
        {"-input_framerate": test_video_framerate}
        if (c_ffmpeg != "wrong_path")
        else {"-input_framerate": "wrong_input"}
    )
    writer = WriteGear(
        output_filename="Output_tif.mp4",
        custom_ffmpeg=c_ffmpeg,
        logging=True,
        **output_params
    )  # Define writer
    while True:
        (grabbed, frame) = stream.read()
        if not grabbed:
            break
        writer.write(frame)
    stream.release()
    writer.close()
    output_video_framerate = getFrameRate(os.path.abspath("Output_tif.mp4"))
    assert test_video_framerate == output_video_framerate
    os.remove(os.path.abspath("Output_tif.mp4"))
def test_output_dimensions():
    """
	Testing "-output_dimensions" special parameter provided by WriteGear(in Compression Mode) 
	"""
    dimensions = (640, 480)
    stream = cv2.VideoCapture(return_testvideo_path())
    output_params = {"-output_dimensions": dimensions}
    writer = WriteGear(output_filename='Output_tod.mp4',
                       custom_ffmpeg=return_static_ffmpeg(),
                       logging=True,
                       **output_params)  #Define writer
    while True:
        (grabbed, frame) = stream.read()
        if not grabbed:
            break
        writer.write(frame)
    stream.release()
    writer.close()

    output = cv2.VideoCapture(os.path.abspath('Output_tod.mp4'))
    output_dim = (output.get(cv2.CAP_PROP_FRAME_WIDTH),
                  output.get(cv2.CAP_PROP_FRAME_HEIGHT))
    assert output_dim[0] == 640 and output_dim[1] == 480
    output.release()

    os.remove(os.path.abspath('Output_tod.mp4'))
def test_write(conversion):
    """
	Testing Compression Mode(FFmpeg) Writer capabilties in different colorspace
	"""
    stream = cv2.VideoCapture(return_testvideo_path(
    ))  #Open live webcam video stream on first index(i.e. 0) device
    writer = WriteGear(output_filename='Output_tw.mp4',
                       custom_ffmpeg=return_static_ffmpeg())  #Define writer
    while True:
        (grabbed, frame) = stream.read()
        if not grabbed:
            break
        if conversion:
            frame = cv2.cvtColor(frame, capPropId(conversion))
        if conversion in ['COLOR_BGR2RGB', 'COLOR_BGR2RGBA']:
            writer.write(frame, rgb_mode=True)
        else:
            writer.write(frame)
    stream.release()
    writer.close()
    basepath, _ = os.path.split(return_static_ffmpeg())
    ffprobe_path = os.path.join(
        basepath, 'ffprobe.exe' if os.name == 'nt' else 'ffprobe')
    result = check_output([
        ffprobe_path, "-v", "error", "-count_frames", "-i",
        os.path.abspath('Output_tw.mp4')
    ])
    if result:
        if not isinstance(result, string_types):
            result = result.decode()
        print('Result: {}'.format(result))
        for i in ["Error", "Invalid", "error", "invalid"]:
            assert not (i in result)
    os.remove(os.path.abspath('Output_tw.mp4'))
Beispiel #7
0
def test_output_dimensions():
    """
    Testing "-output_dimensions" special parameter provided by WriteGear(in Compression Mode)
    """
    dimensions = (640, 480)
    stream = cv2.VideoCapture(return_testvideo_path())
    output_params = {}
    if platform.system() == "Windows":
        output_params = {
            "-output_dimensions": dimensions,
            "-ffmpeg_download_path": tempfile.gettempdir(),
        }
    else:
        output_params = {"-output_dimensions": dimensions}
    writer = WriteGear(output_filename="Output_tod.mp4",
                       custom_ffmpeg=return_static_ffmpeg(),
                       logging=True,
                       **output_params)  # Define writer
    while True:
        (grabbed, frame) = stream.read()
        if not grabbed:
            break
        writer.write(frame)
    stream.release()
    writer.close()

    output = cv2.VideoCapture(os.path.abspath("Output_tod.mp4"))
    output_dim = (
        output.get(cv2.CAP_PROP_FRAME_WIDTH),
        output.get(cv2.CAP_PROP_FRAME_HEIGHT),
    )
    assert output_dim[0] == 640 and output_dim[1] == 480
    output.release()

    remove_file_safe("Output_tod.mp4")
Beispiel #8
0
def test_fail_framechannels(compression_mode):
    """
    IO Test - made to fail with multiple frame channels
    """
    np.random.seed(0)
    # generate random data for 10 frames
    random_data1 = np.random.random(size=(480, 640, 3)) * 255
    input_data1 = random_data1.astype(np.uint8)

    np.random.seed(0)
    random_data2 = np.random.random(size=(480, 640, 4)) * 255
    input_data2 = random_data2.astype(np.uint8)

    writer = None
    try:
        writer = WriteGear("output.mp4", compression_mode=compression_mode)
        writer.write(None)
        writer.write(input_data1)
        writer.write(input_data2)
    except Exception as e:
        if isinstance(e, ValueError):
            pytest.xfail("Test Passed!")
        else:
            pytest.fail(str(e))
    finally:
        if not writer is None:
            writer.close()
class VideoBuilder:

    def __init__(self, filename, width, height, fps):
        self.closed = False
        self.filepath = '{}.mp4'.format(filename)
        
        output_parameters = {
            "-vcodec":"libx264",
            "-movflags": "+dash",
            "-input_framerate": fps,
            "-output_dimensions": (width, height)
        }

        self._writer = WriteGear(output_filename=self.filepath,
                                **output_parameters)

        self._frame_count = 0
        self.fps = fps

    def add_frame(self, frame):
        assert not self.closed, "Can't add frame to closed VideoBuilder!"

        self._writer.write(frame)
        self._frame_count += 1

    def duration(self):
        return round(self._frame_count / self.fps, 3)

    def close(self):
        self.closed = True
        self._writer.close()
Beispiel #10
0
def beginRecording(button):

    global fileNameList
    global writer1
    global writer2
    global video_streams

    # start streams
    stream1 = video_streams[len(video_streams) - 2]  # penultimate stream
    stream2 = video_streams[len(video_streams) - 1]  # ultimate stream
    stream1.start()
    stream2.start()

    fileNameList = compute.getNewFileNames()
    print(fileNameList)
    writer1 = WriteGear(output_filename=fileNameList[0], **output_params1)
    writer2 = WriteGear(output_filename=fileNameList[1], **output_params2)

    change_LED(255, 0, 0)  # Red LED to indicate recording

    while True:

        frameA = stream1.read()
        # read frames from stream1

        frameB = stream2.read()
        # read frames from stream2
        print("stream1.framerate:" + str(stream1.framerate))
        print("stream2.framerate:" + str(stream2.framerate))

        # check if any of two frame is None
        if frameA is None or frameB is None:
            #if True break the infinite loop
            break

        #cv2.imshow("Output Frame1", frameA)
        #cv2.imshow("Output Frame2", frameB)
        # Show output window of stream1 and stream 2 seperately

        writer1.write(frameA)
        writer2.write(frameB)

        # If button is pressed, exit recording
        if button.is_pressed:
            time.sleep(0.5)
            print("Stop Recording!")
            change_LED(255, 165, 0)
            global endTime
            endTime = time.time()
            break
Beispiel #11
0
def test_failedchannels():
    """
    IO Test - made to fail with invalid channel length
    """
    np.random.seed(0)
    # generate random data for 10 frames
    random_data = np.random.random(size=(480, 640, 5)) * 255
    input_data = random_data.astype(np.uint8)

    # 'garbage' extension does not exist
    with pytest.raises(ValueError):
        writer = WriteGear("output.mp4")
        writer.write(input_data)
        writer.close()
Beispiel #12
0
def test_failedextension():
	"""
	IO Test - made to fail with filename with wrong extention
	"""
	np.random.seed(0)
	# generate random data for 10 frames
	random_data = np.random.random(size=(10, 1080, 1920, 3)) * 255
	input_data = random_data.astype(np.uint8)
	
	# 'garbage' extension does not exist
	with pytest.raises(ValueError):
		writer = WriteGear("garbage.garbage")
		writer.write(input_data)
		writer.close()
Beispiel #13
0
def test_assertfailedwrite():
	"""
	IO Test - made to fail with Wrong Output file path
	"""
	np.random.seed(0)
	# generate random data for 10 frames
	random_data = np.random.random(size=(10, 1080, 1920, 3)) * 255
	input_data = random_data.astype(np.uint8)

	with pytest.raises(AssertionError):
		# wrong folder path does not exist
		writer = WriteGear("wrong_path/output.mp4")
		writer.write(input_data)
		writer.close()
Beispiel #14
0
def test_invalid_params():
    """
    Invalid parameter Failure Test - Made to fail by calling invalid parameters
    """
    np.random.seed(0)
    # generate random data for 10 frames
    random_data = np.random.random(size=(480, 640, 3)) * 255
    input_data = random_data.astype(np.uint8)
    with pytest.raises(ValueError):
        output_params = {"-vcodec": "unknown"}
        writer = WriteGear(
            "output.mp4", compression_mode=True, logging=True, **output_params
        )
        writer.write(input_data)
        writer.write(input_data)
        writer.close()
def capture():
    global capture_state
    global del_it
    global start

    
    # while in not_exit mode
    while capture_state != 2:

        out_file = str(start)
        # capture mode
        if capture_state == 1:
            log_scripts.log_msg('capturing : '+out_file)
            
            out_file = 'data\\'+out_file
            vid_out = WriteGear(out_file+'.avi',compression_mode=False,
                                custom_ffmpeg='C:\Program Files (x86)\ffmpeg\bin',**output_params)
            txt_out = open(out_file+'.txt', 'w')

            # capture 512 frames, or stop if altered
            cnt = 0
            while cnt <= 512 and not del_it:
                vid_out.write(video_scripts.get_state())
                txt_out.write(key_scripts.get_state())
                cnt = cnt + 1
            
            vid_out.close()
            txt_out.close()

            # if delete
            if del_it:
                os.remove(out_file+'.avi')
                os.remove(out_file+'.txt')
                del_it = 0
                capture_state = 0
                log_scripts.log_msg('deleting : '+out_file)
                log_scripts.log_msg('state  : False')
                log_scripts.log_msg('Capturing : Stop')
            else:
                log_scripts.log_msg('saving : '+out_file)
                start = start + 1
        else:
            log_scripts.log_msg('at hold')
            time.sleep(2)
    log_scripts.log_msg('capture thread exited')
    exit()
def test_WriteGear_compression(f_name, c_ffmpeg, output_params, result):
	"""
	Testing WriteGear Compression-Mode(FFmpeg) with different parameters
	"""
	try:
		stream = cv2.VideoCapture(return_testvideo_path()) #Open stream
		writer = WriteGear(output_filename = f_name, compression_mode = True, **output_params)
		while True:
			(grabbed, frame) = stream.read()
			if not grabbed:
				break
			writer.write(frame)
		stream.release()
		writer.close()
		if f_name and f_name != tempfile.gettempdir():
			os.remove(os.path.abspath(f_name))
	except Exception as e:
		if result: pytest.fail(str(e))
def test_input_framerate():
	"""
	Testing "-input_framerate" parameter provided by WriteGear(in Compression Mode) 
	"""
	stream = cv2.VideoCapture(return_testvideo_path()) #Open stream
	test_video_framerate = stream.get(cv2.CAP_PROP_FPS)
	output_params = {"-input_framerate":test_video_framerate}
	writer = WriteGear(output_filename = 'Output_tif.mp4', custom_ffmpeg = return_static_ffmpeg(), **output_params) #Define writer 
	while True:
		(grabbed, frame) = stream.read()
		if not grabbed:
			break
		writer.write(frame) 
	stream.release()
	writer.close()
	output_video_framerate = getFrameRate(os.path.abspath('Output_tif.mp4'))
	assert test_video_framerate == output_video_framerate
	os.remove(os.path.abspath('Output_tif.mp4'))
Beispiel #18
0
def test_write(conversion):
    """
    Testing WriteGear Compression-Mode(FFmpeg) Writer capabilties in different colorspace with CamGearAPI.
    """
    # Open stream
    stream = CamGear(
        source=return_testvideo_path(), colorspace=conversion, logging=True
    ).start()
    writer = WriteGear(
        output_filename="Output_tw.mp4", custom_ffmpeg=return_static_ffmpeg()
    )  # Define writer
    while True:
        frame = stream.read()
        # check if frame is None
        if frame is None:
            # if True break the infinite loop
            break
        if conversion == "COLOR_BGR2RGBA":
            writer.write(frame, rgb_mode=True)
        elif conversion == "COLOR_BGR2INVALID":
            # test invalid color_space value
            stream.color_space = "wrong_colorspace"
            conversion = "COLOR_BGR2INVALID2"
            writer.write(frame)
        elif conversion == "COLOR_BGR2INVALID2":
            # test wrong color_space value
            stream.color_space = 1546755
            conversion = ""
            writer.write(frame)
        else:
            writer.write(frame)
    stream.stop()
    writer.close()
    basepath, _ = os.path.split(return_static_ffmpeg())
    ffprobe_path = os.path.join(
        basepath, "ffprobe.exe" if os.name == "nt" else "ffprobe"
    )
    result = check_output(
        [
            ffprobe_path,
            "-v",
            "error",
            "-count_frames",
            "-i",
            os.path.abspath("Output_tw.mp4"),
        ]
    )
    if result:
        if not isinstance(result, string_types):
            result = result.decode()
        logger.debug("Result: {}".format(result))
        for i in ["Error", "Invalid", "error", "invalid"]:
            assert not (i in result)
    os.remove(os.path.abspath("Output_tw.mp4"))
Beispiel #19
0
def test_invalid_encoder(v_codec):
    """
    Invalid encoder Failure Test
    """
    np.random.seed(0)
    # generate random data for 10 frames
    random_data = np.random.random(size=(480, 640, 3)) * 255
    input_data = random_data.astype(np.uint8)
    try:
        output_params = {"-vcodec": v_codec}
        writer = WriteGear("output.mp4",
                           compression_mode=True,
                           logging=True,
                           **output_params)
        writer.write(input_data)
        writer.write(input_data)
        writer.close()
    except Exception as e:
        pytest.fail(str(e))
Beispiel #20
0
def test_failedchannels(size):
    """
    IO Test - made to fail with invalid channel lengths
    """
    np.random.seed(0)
    if len(size) > 1:
        random_data_1 = np.random.random(size=size[0]) * 255
        input_data_ch1 = random_data_1.astype(np.uint8)
        random_data_2 = np.random.random(size=size[1]) * 255
        input_data_ch3 = random_data_2.astype(np.uint8)
        writer = WriteGear("output.mp4", compression_mode=True)
        writer.write(input_data_ch1)
        writer.write(input_data_ch3)
        writer.close()
    else:
        random_data = np.random.random(size=size) * 255
        input_data = random_data.astype(np.uint8)
        writer = WriteGear("output.mp4", compression_mode=True, logging=True)
        writer.write(input_data)
        writer.close()
Beispiel #21
0
def Videowriter_compression_mode(path):
	"""
	Function to Benchmark VidGearwriter - (Compression Mode: FFmpeg)
	"""
	stream = VideoGear(source=path).start()
	writer = WriteGear(output_filename = 'Output_vc.mp4', custom_ffmpeg = return_static_ffmpeg())
	fps_Vid = FPS().start()
	while True:
		frame = stream.read()
		if frame is None:
			break
		writer.write(frame)
		fps_Vid.update()
	fps_Vid.stop()
	stream.stop()
	writer.close()
	print("FFmpeg Writer")
	print("[LOG] total elasped time: {:.2f}".format(fps_Vid.total_time_elapsed()))
	print("[LOG] approx. FPS: {:.2f}".format(fps_Vid.fps()))
	os.remove(os.path.abspath('Output_vc.mp4'))
Beispiel #22
0
def Videowriter_non_compression_mode(path):
	"""
	Function to Benchmark VidGearwriter - (Non-Compression Mode: OpenCV)
	"""
	stream = VideoGear(source=path).start() 
	writer = WriteGear(output_filename = 'Output_vnc.mp4', compression_mode = False )
	fps_CV = FPS().start()
	while True:
		frame = stream.read()
		if frame is None:
			break
		writer.write(frame)
		fps_CV.update()
	fps_CV.stop()
	stream.stop()
	writer.close()
	print("OpenCV Writer")
	print("[LOG] total elasped time: {:.2f}".format(fps_CV.total_time_elapsed()))
	print("[LOG] approx. FPS: {:.2f}".format(fps_CV.fps()))
	os.remove(os.path.abspath('Output_vnc.mp4'))
Beispiel #23
0
def WriteGear_compression_mode():
    """
	Function to Benchmark WriteGear's Compression Mode(FFmpeg)
	"""
    options = {'THREADED_QUEUE_MODE': False}
    stream = VideoGear(source=return_testvideo_path(), **options).start()
    writer = WriteGear(output_filename='Output_vc.mp4',
                       custom_ffmpeg=return_static_ffmpeg())
    fps_Vid = FPS().start()
    while True:
        frame = stream.read()
        if frame is None:
            break
        writer.write(frame)
        fps_Vid.update()
    fps_Vid.stop()
    stream.stop()
    writer.close()
    logger.debug("FFmpeg Writer")
    logger.debug("total elasped time: {:.2f}".format(
        fps_Vid.total_time_elapsed()))
    logger.debug("approx. FPS: {:.2f}".format(fps_Vid.fps()))
    os.remove(os.path.abspath('Output_vc.mp4'))
Beispiel #24
0
class CameraSettings(tk.Toplevel):
    """The primary class for the camera settings window.

    Configures and provides window callback methods. Also provides 
    methods to stream/record video and capture images that can be 
    used for post hoc analyses (e.g., camera calibration). The 
    `on_quit()` method is called prior to application destruction.   

    Attributes
    ----------
    config : dict
        The current configuration settings for the application
    output_params : dict
        Video output parameters for WriteGear
    exp_controller : instance
        Serial interface to the experiment microcontroller
    exp_connected : bool 
        True if the experiment interface is active
    num_cams : instance
        Tkinter StringVar that captures the user-selected number 
        of cameras
    fps : instance 
        Tkinter StringVar that captures the user-selected frames 
        per second
    exposure : instance 
        Tkinter StringVar that captures the user-selected camera 
        exposure
    gain : instance 
        Tkinter StringVar that captures the user-selected camera 
        gain.
    gpi_mode : instance 
        Tkinter StringVar that captures the camera input mode. 
        Currently, this is selected by the configuration file and 
        not the user as it is assumed all camera interfaces will 
        trigger images with the same method.
    trigger_source : instance 
        Tkinter StringVar that captures the user-selected camera 
        trigger source. Defers to the configuration file. Do not 
        change unless you know what you are doing.
    gpo_mode : instance 
        Tkinter StringVar that captures the user-selected camera 
        sync output mode.
    buffer_dur : instance 
        Tkinter StringVar that captures the user-selected duration 
        that should be used by protocols for buffering images in 
        RAM when real-time encoding is not used. 
    img_width : instance 
        Tkinter StringVar that captures the user-selected image 
        width in pixels. Must be less than your camera's maximum 
        allowable image width and divisible by it's width increment 
        value. See your camera's manual for details.
    img_height : instance 
        Tkinter StringVar that captures the user-selected image 
        height in pixels. Must be less than your camera's maximum 
        allowable image width and divisible by it's width increment 
        value. See your camera's manual for details. 
    offset_x : instance 
        Tkinter StringVar that captures the user-selected image 
        horizontal offset in pixels. Must be less than your camera's
        maximum allowable image width minus the selected img_width. 
        See your camera's manual for details.
    offset_y : instance 
        Tkinter StringVar that captures the user-selected image 
        vertical offset in pixels. Must be less than your camera's 
        maximum allowable image width minus the selected img_height. 
        See your camera's manual for details.
    downsampling : instance
        Tkinter StringVar that captures the user-selected image 
        downsampling. Can be 'XI_1x1' (full resolution) or 'XI_2x2' 
        (1/4 resolution). The latter can only be used when no cropping 
        or offsets are applied. 
    poi_threshold : instance 
        Tkinter StringVar that captures the user-selected 
        pixel-of-interest (poi) threshold (standard deviations) used 
        for reach detection.
    streaming : bool
        True if camera interface is acquiring and displaying images.
    cams_connected : bool
        True if camera interface is active.
    draw_saved : bool 
        True if saved poi's should be displayed while streaming.
    add_pois : bool 
        True if clicking on images should add new poi's during 
        streaming.
    remove_pois : bool
        True if clicking on images should remove added or saved poi's 
        during streaming.
    added_pois : list 
        A nested list of added poi coordinates for each camera. 
    saved_pois : list
        A nested list of saved poi coordinates for each camera. 
    capture : bool
        True if image should be saved to png while streaming.
    record : bool
        True while recording video.
    img_num : int 
        Counts captured images 

    """

    def __init__(self, parent):
        #create window and suppress parent
        tk.Toplevel.__init__(self, parent)
        self.transient(parent) 
        self.grab_set()
        self.title("Camera Settings")
        self.configure(bg="white")
        self.protocol("WM_DELETE_WINDOW", self.on_quit) 
        #initialize tk variables from config
        self.config = config.load_config(open('./temp/tmp_config.txt'))
        self.output_params = self.config['CameraSettings']['output_params']        
        self.exp_controller = expint.start_interface(self.config)
        self.exp_connected = True       
        self.num_cams = tk.StringVar()
        self.num_cams.set(str(self.config['CameraSettings']['num_cams']))
        self.fps = tk.StringVar()
        self.fps.set(str(self.config['CameraSettings']['fps']))
        self.exposure = tk.StringVar()
        self.exposure.set(str(self.config['CameraSettings']['exposure']))
        self.gain = tk.StringVar()
        self.gain.set(str(self.config['CameraSettings']['gain']))   
        self.gpi_mode = tk.StringVar()
        self.gpi_mode.set(self.config['CameraSettings']['gpi_mode'])
        self.trigger_source = tk.StringVar()
        self.trigger_source.set(self.config['CameraSettings']['trigger_source'])
        self.gpo_mode = tk.StringVar()
        self.gpo_mode.set(self.config['CameraSettings']['gpo_mode'])
        self.buffer_dur = tk.StringVar()
        self.buffer_dur.set(str(self.config['ExperimentSettings']['buffer_dur']))
        self.img_width = tk.StringVar()
        self.img_width.set(str(self.config['CameraSettings']['img_width']))
        self.img_height = tk.StringVar()
        self.img_height.set(str(self.config['CameraSettings']['img_height']))
        self.offset_x = tk.StringVar()
        self.offset_x.set(str(self.config['CameraSettings']['offset_x']))
        self.offset_y = tk.StringVar()
        self.offset_y.set(str(self.config['CameraSettings']['offset_y']))
        self.downsampling = tk.StringVar()
        self.downsampling.set(str(self.config['CameraSettings']['downsampling']))
        self.poi_threshold = tk.StringVar()
        self.poi_threshold.set(str(self.config['CameraSettings']['poi_threshold']))   
        #initialize housekeeping variables     
        self.streaming = False
        self.cams_connected = False
        self.draw_saved = False
        self.add_pois = False
        self.remove_pois = False
        self.added_pois = [[] for _ in range(self.config['CameraSettings']['num_cams'])]
        self.saved_pois = [[] for _ in range(self.config['CameraSettings']['num_cams'])] 
        self.capture = False
        self.record = False
        self.img_num = [1]
        #configure window      
        self._configure_window()

    def on_quit(self):
        """Called prior to destruction of the camera settings window.

        Prior to destruction, the configuration file must be updated
        to reflect the change in settings, and any active interfaces 
        must be closed. 

        """
        self.config['CameraSettings']['num_cams'] = int(self.num_cams.get())
        self.config['CameraSettings']['fps'] = int(self.fps.get())
        self.config['CameraSettings']['exposure'] = int(self.exposure.get())
        self.config['CameraSettings']['gain'] = float(self.gain.get()) 
        self.config['CameraSettings']['img_width'] = int(self.img_width.get())
        self.config['CameraSettings']['img_height'] = int(self.img_height.get())
        self.config['CameraSettings']['offset_x'] = int(self.offset_x.get())
        self.config['CameraSettings']['offset_y'] = int(self.offset_y.get())
        self.config['CameraSettings']['downsampling'] = self.downsampling.get()
        self.config['CameraSettings']['trigger_source'] = self.trigger_source.get()
        self.config['CameraSettings']['gpo_mode'] = self.gpo_mode.get()
        self.config['CameraSettings']['poi_threshold'] = float(self.poi_threshold.get())
        self.config['CameraSettings']['self.output_params'] = (self.config['CameraSettings']['num_cams']*
            self.config['CameraSettings']['img_width'],self.config['CameraSettings']['img_height'])
        config.save_tmp(self.config)
        if self.streaming:
            self._on_stream_quit()
        expint.stop_interface(self.exp_controller)
        self.destroy()

    def _configure_window(self):        
        tk.Label(
            self,
            text = "# Cameras:", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 0, sticky = 'W')   
        self.num_cams_menu = tk.OptionMenu(self, self.num_cams,"1","2","3")
        self.num_cams_menu.configure(width = 12, anchor = "w")
        self.num_cams_menu.grid(row = 0, column = 1)
        tk.Label(
            self,
            text = "FPS:", 
            font = 'Arial 10 bold', 
            bg = "white", 
            width = 23,
            anchor = "e"
            ).grid(row = 1, sticky = 'W')   
        tk.Entry(self, textvariable = self.fps, width = 17).grid(row = 1, column = 1)
        tk.Label(
            self,
            text = "Exposure (usec):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 2, sticky = 'W')   
        tk.Entry(self, textvariable = self.exposure, width = 17).grid(row = 2, column = 1)
        tk.Label(
            self,
            text = "Gain:", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 3, sticky = 'W')   
        tk.Entry(self, textvariable = self.gain, width = 17).grid(row = 3, column = 1)
        tk.Label(
            self,
            text = "Trigger Source:", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 4, sticky = 'W')   
        self.gpi_trig_menu = tk.OptionMenu(
            self,
            self.trigger_source,
            "XI_TRG_OFF",
            "XI_TRG_EDGE_RISING",
            "XI_TRG_EDGE_FALLING",
            "XI_TRG_SOFTWARE",
            "XI_TRG_LEVEL_HIGH",
            "XI_TRG_LEVEL_LOW")
        self.gpi_trig_menu.configure(width = 12, anchor = "w")
        self.gpi_trig_menu.grid(row = 4, column = 1)
        tk.Label(
            self,
            text = "Sync Mode:", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 5, sticky = 'W')   
        self.gpo_mode_menu = tk.OptionMenu(
            self,
            self.gpo_mode,
            "XI_GPO_OFF",
            "XI_GPO_ON",
            "XI_GPO_FRAME_ACTIVE",
            "XI_GPO_FRAME_ACTIVE_NEG",
            "XI_GPO_EXPOSURE_ACTIVE",
            "XI_GPO_EXPOSURE_ACTIVE_NEG",
            "XI_GPO_FRAME_TRIGGER_WAIT",
            "XI_GPO_FRAME_TRIGGER_WAIT_NEG",
            "XI_GPO_EXPOSURE_PULSE",
            "XI_GPO_EXPOSURE_PULSE_NEG",
            "XI_GPO_BUSY",
            "XI_GPO_BUSY_NEG",
            "XI_GPO_HIGH_IMPEDANCE",
            "XI_GPO_FRAME_BUFFER_OVERFLOW")
        self.gpo_mode_menu.configure(width = 12, anchor = "w")
        self.gpo_mode_menu.grid(row = 5, column = 1)
        tk.Label(
            self,
            text = "Image Buffer (sec):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 6, sticky = 'W')   
        tk.Entry(self, textvariable = self.buffer_dur, width = 17).grid(row = 6, column = 1)
        tk.Label(
            self,
            text = "Image Width (pix):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 7, sticky = 'W')   
        tk.Entry(self, textvariable = self.img_width, width = 17).grid(row = 7, column = 1)
        tk.Label(
            self,
            text = "Image Height (pix):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 8, sticky = 'W')   
        tk.Entry(self, textvariable = self.img_height, width = 17).grid(row = 8, column = 1)
        tk.Label(
            self,
            text = "Image X Offest (pix):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 9, sticky = 'W')   
        tk.Entry(self, textvariable = self.offset_x, width = 17).grid(row = 9, column = 1)
        tk.Label(
            self,
            text = "Image Y Offset (pix):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 10, sticky = 'W')   
        tk.Entry(self, textvariable = self.offset_y, width = 17).grid(row = 10, column = 1)
        tk.Label(
            self,
            text = "Downsampling:", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e").grid(row = 11, sticky = 'W') 
        self.downsampling_menu = tk.OptionMenu(
            self,
            self.downsampling,
            "XI_DWN_1x1",
            "XI_DWN_2x2")
        self.downsampling_menu.configure(width = 12, anchor = "w")
        self.downsampling_menu.grid(row = 11, column = 1)
        tk.Button(
            self,
            text = "Start Streaming",
            font = 'Arial 10 bold',
            width = 14,
            command = self.start_stream_callback
            ).grid(row = 12, column = 0, sticky = "e")
        tk.Button(
            self,
            text = "Stop Streaming",
            font = 'Arial 10 bold',
            width = 14,
            command = self.stop_stream_callback
            ).grid(row = 13, column = 0, sticky = "e")
        tk.Button(
            self,
            text = "Load POIs",
            font = 'Arial 10 bold',
            width = 14,
            command = self.load_pois_callback
            ).grid(row = 12, column = 1)
        tk.Button(
            self,
            text = "Save POIs",
            font = 'Arial 10 bold',
            width = 14,
            command = self.save_pois_callback
            ).grid(row = 13, column = 1)
        tk.Button(
            self,
            text = "Add POIs",
            font = 'Arial 10 bold',
            width = 14,
            command = self.add_pois_callback
            ).grid(row = 12, column = 2)
        tk.Button(
            self,
            text = "Remove POIs",
            font = 'Arial 10 bold',
            width = 14,
            command = self.remove_pois_callback
            ).grid(row = 13, column = 2)
        tk.Button(
            self,
            text = "Capture Image",
            font = 'Arial 10 bold',
            width = 14,
            command = self.capture_image_callback
            ).grid(row = 14, column = 0,sticky = "e")
        tk.Button(
            self,
            text = "Start Record",
            font = 'Arial 10 bold',
            width = 14,
            command = self.start_rec_callback
            ).grid(row = 14, column = 1)
        tk.Button(
            self,
            text = "Stop Record",
            font = 'Arial 10 bold',
            width = 14,
            command = self.stop_rec_callback
            ).grid(row = 14, column = 2)        
        tk.Label(
            self,
            text = "POI Threshold (stdev):", 
            font = 'Arial 10 bold', 
            bg = "white",
            width = 23,
            anchor = "e"
            ).grid(row = 15, sticky = 'W')   
        tk.Entry(self, textvariable = self.poi_threshold, width = 17).grid(row = 15, column = 1)
        tk.Button(
            self,
            text = "Toggle Lights", 
            font = 'Arial 10 bold',
            width = 14, 
            command = self.toggle_lights_callback
            ).grid(row = 16, column = 1)

    # Callbacks -----------------------------------------------------------------------------------

    def start_stream_callback(self):
        """Begins triggering and displaying camera images to the screen."""
        if not self.cams_connected:
            self.config['CameraSettings']['num_cams'] = int(self.num_cams.get())
            self.config['CameraSettings']['fps'] = int(self.fps.get())
            self.config['CameraSettings']['exposure'] = int(self.exposure.get())
            self.config['CameraSettings']['gain'] = float(self.gain.get())   
            self.config['CameraSettings']['trigger_source'] = self.trigger_source.get()
            self.config['CameraSettings']['gpo_mode'] = self.gpo_mode.get()
            self.config['CameraSettings']['img_width'] = int(self.img_width.get())
            self.config['CameraSettings']['img_height'] = int(self.img_height.get())
            self.config['CameraSettings']['offset_x'] = int(self.offset_x.get())
            self.config['CameraSettings']['offset_y'] = int(self.offset_y.get())  
            self.config['CameraSettings']['downsampling'] = self.downsampling.get()
            self.cams = camint.start_interface(self.config)
            self.cams_connected = True 
            self.img = camint.init_image()                                     
        if not self.streaming:
            self._start_stream()
        else: 
            tkMessageBox.showinfo("Warning", "Already streaming.") 

    def stop_stream_callback(self):
        """Stops triggering and displaying new images."""
        self.streaming = False       


    def load_pois_callback(self):
        """Loads previously saved pixels-of-interest and displays them
        over the streaming images in green."""
        if self.streaming:
            if len(self.config['CameraSettings']['saved_pois'])>0:
                self.saved_pois = self.config['CameraSettings']['saved_pois']
                self.draw_saved = True
            else:
                tkMessageBox.showinfo("Warning", "No saved POIs.")
        else: 
            tkMessageBox.showinfo("Warning", "Must be streaming to load POIs.")

    def add_pois_callback(self):
        """Allows the user to add new pixels-of-interest by clicking 
        the desired pixel using the cursor. Unsaved added pixels are
        displayed in red."""
        if self.streaming:
            self.add_pois = True
            self.remove_pois = False
        else: 
            tkMessageBox.showinfo("Warning", "Must be streaming to add POIs.") 

    def remove_pois_callback(self):
        """Allows the user to removed either saved or unsaved 
        pixels-of-interest. However, the user must save all added 
        pixels in order for the changes to be reflected in the
        configuration file.""" 
        if self.streaming:
            if (len(self.added_pois)+len(self.saved_pois))>0:
                self.add_pois = False
                self.remove_pois = True
            else:
                tkMessageBox.showinfo("Warning", "No POIs to remove.")
        else: 
            tkMessageBox.showinfo("Warning", "Must be streaming to remove POIs.")

    def save_pois_callback(self):
        """Saves all added and/or removed pixel-of-interest to the 
        configuration file."""
        for i in range(self.config['CameraSettings']['num_cams']):
            self.saved_pois[i] += self.added_pois[i] 
        self.config['CameraSettings']['saved_pois'] = self.saved_pois 
        self.added_pois = [[] for _ in range(self.config['CameraSettings']['num_cams'])]

    def capture_image_callback(self):
        """Allows the user to capture an image while streaming. The image
        is saved to the `calibration images` folder in the data output 
        directory."""
        if self.streaming:
            self.capture = True
        else: 
            tkMessageBox.showinfo("Warning", "Must be streaming to capture images.")

    def start_rec_callback(self):
        """Allows the user to record a video which is saved to the `calibration
        videos` folder of the data output directory."""
        if not self.streaming:
            self.config['CameraSettings']['num_cams'] = int(self.num_cams.get())
            self.config['CameraSettings']['fps'] = int(self.fps.get())
            self.config['CameraSettings']['exposure'] = int(self.exposure.get())
            self.config['CameraSettings']['gain'] = float(self.gain.get())   
            self.config['CameraSettings']['trigger_source'] = self.trigger_source.get()
            self.config['CameraSettings']['gpo_mode'] = self.gpo_mode.get()
            self.config['CameraSettings']['img_width'] = int(self.img_width.get())
            self.config['CameraSettings']['img_height'] = int(self.img_height.get())
            self.config['CameraSettings']['offset_x'] = int(self.offset_x.get())
            self.config['CameraSettings']['offset_y'] = int(self.offset_y.get())
            self.output_params = (self.config['CameraSettings']['num_cams']*
            self.config['CameraSettings']['img_width'],self.config['CameraSettings']['img_height']) 
            self.config['CameraSettings']['output_params'] = self.output_params            
            self.cams = camint.start_interface(self.config)
            self.cams_connected = True
            self.img = camint.init_image()
            self.calibration_path = self.config['ReachMaster']['data_dir'] + "/calibration_videos/"
            if not os.path.isdir(self.calibration_path):
                os.makedirs(self.calibration_path)
            self.vid_fn = self.calibration_path + str(datetime.datetime.now()) + '.mp4'             
            self.video = WriteGear(
                output_filename = self.vid_fn,
                compression_mode = True,
                logging=False,
                **self.output_params)
            self.delay = int(np.round(1.0/float(self.config['CameraSettings']['fps'])*1000.0))
            self.record = True
            self._rec()
        else: 
            tkMessageBox.showinfo("Warning", "Shouldn't record while streaming. Bad framerates!")

    def stop_rec_callback(self):
        """Stops a video recording."""
        self.record = False
        self.video.close()
        camint.stop_interface(self.cams)
        self.cams_connected = False

    def toggle_lights_callback(self):
        """Allows the user to toggle the neopixel lights while streaming or
        recording a video."""
        if self.exp_connected:
            expint.toggle_lights(self.exp_controller)
        else:
            tkMessageBox.showinfo("Warning", "Experiment controller not connected.")

    # private methods -------------------------------------------------------------------------------

    def _on_stream_quit(self):
        self.streaming = False          
        self.poi_active = False  
        self.draw_saved = False    
        for i in range(self.config['CameraSettings']['num_cams']):
            self.cam_windows[i].destroy()
        camint.stop_interface(self.cams)
        self.cams_connected = False

    def _start_stream(self):
        self.cam_windows = [0 for _ in range(self.config['CameraSettings']['num_cams'])]
        for i in range(self.config['CameraSettings']['num_cams']):
            self.cam_windows[i] = tk.Toplevel(self)
            self.cam_windows[i].title("Camera"+str(i))
            self.cam_windows[i].protocol("WM_DELETE_WINDOW", self._on_stream_quit)
            self.cam_windows[i].canvas = tk.Canvas(self.cam_windows[i], 
                width = self.config['CameraSettings']['img_width'], 
                height = self.config['CameraSettings']['img_height'])
            self.cam_windows[i].canvas.grid(row=0,column= 0)            
        self.delay = int(np.round(1.0/float(self.config['CameraSettings']['fps'])*1000.0))
        self.photo_img = [0 for _ in range(self.config['CameraSettings']['num_cams'])]
        self.streaming = True
        self._refresh()

    def _refresh(self):
        if self.streaming:
            expint.trigger_image(self.exp_controller)
            now = str(int(round(time.time()*1000)))            
            for i in range(self.config['CameraSettings']['num_cams']):
                #display image
                npimg = camint.get_npimage(self.cams[i],self.img)
                npimg = cv2.cvtColor(npimg,cv2.COLOR_BAYER_BG2RGB)
                self.photo_img[i] = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(npimg))
                self.cam_windows[i].canvas.create_image(
                    0, 0, image = self.photo_img[i], anchor = tk.NW
                    )                
                #draw saved pixels (green)
                if self.draw_saved:
                    for poi in self.saved_pois[i]:                         
                        self.cam_windows[i].canvas.create_line(
                            poi[0], poi[1], poi[0] + 1, poi[1], width = 1, fill = 'green'
                            )
                #draw currently addded pixels (red)
                for poi in self.added_pois[i]:                        
                    self.cam_windows[i].canvas.create_line(
                        poi[0], poi[1], poi[0] + 1, poi[1], width = 1, fill = 'red'
                        )
                #draw cursor for adding/removing pois
                if self.add_pois or self.remove_pois:
                    self._draw_cursor(i)
                    self.cam_windows[i].bind(
                        '<Button-1>', lambda event, camid = i:self._draw_poi(event,camid)
                        )
                #prepare frame for possible capture
                if i == 0:
                    frame = npimg
                else:
                    frame = np.hstack((frame,npimg))
            if self.capture:
                self.calibration_path = self.config['ReachMaster']['data_dir'] + "/calibration_images/"
                if not os.path.isdir(self.calibration_path):
                    os.makedirs(self.calibration_path)
                fn = "image" + str(self.img_num[0])
                cv2.imwrite('%s/%s.png' % (self.calibration_path, fn), frame)
                self.capture = False
                self.img_num[0] += 1
            self.after(self.delay,self._refresh)

    def _draw_cursor(self,i):
        self.cam_windows[i].bind('<Motion>', self.cam_windows[i].config(cursor = "cross"))        

    def _draw_poi(self, event, camid):
        if self.add_pois:
            self.added_pois[camid].append([event.x,event.y])  
        elif self.remove_pois:
            if len(self.saved_pois[camid])>0:
                tmp = []
                for poi in self.saved_pois[camid]:
                    if np.sqrt((event.x-poi[0])**2+(event.y-poi[1])**2)>5:
                        tmp.append(poi)
                self.saved_pois[camid] = tmp
            if len(self.added_pois[camid])>0:
                tmp = []
                for poi in self.added_pois[camid]:
                    if np.sqrt((event.x-poi[0])**2+(event.y-poi[1])**2)>5:
                        tmp.append(poi)
                self.added_pois[camid] = tmp

    def _rec(self):
        if self.record:
            expint.trigger_image(self.exp_controller)
            now = str(int(round(time.time()*1000)))          
            for i in range(self.config['CameraSettings']['num_cams']):
                npimg = camint.get_npimage(self.cams[i],self.img)
                # npimg = cv2.cvtColor(npimg,cv2.COLOR_BAYER_BG2RGB)
                if i == 0:
                    frame = npimg
                else:
                    frame = np.hstack((frame,npimg))               
            self.video.write(frame)
            self.after(self.delay,self.rec)
Beispiel #25
0
                 backend=cv2.CAP_V4L,
                 logging=True,
                 **options).start()
writer = WriteGear(output_filename='rtsp://192.168.0.100:5541/test',
                   compression_mode=True,
                   logging=True,
                   **output_params)

i = 0
faces = []
while True:
    try:
        i += 1
        frame = stream.read()
        if frame is not None:
            # 半秒读取下人脸
            if i % 12 == 0:
                faces = detect_face(frame)
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)
            writer.write(frame)
        else:
            print("no frame")
    except KeyboardInterrupt:
        break

# safely close video stream
stream.stop()
writer.close()
print("time:", datetime.datetime.now().timestamp() - t.timestamp())
def process(requester, filename):
    """
    pre deliver_key data
    0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
    0                         500                                 1000
    :return:
    """
    try:
        # The decode_responses flag here directs the client to convert the responses from Redis into Python strings
        r = redis.StrictRedis(host=redis_host,
                              port=redis_port,
                              password=redis_password,
                              decode_responses=True)

        video_face_map = json.loads(r.hget("video_face_map", filename))
        time_face_map = json.loads(r.hget('time_face_map', filename))

        cap = cv2.VideoCapture(video_folder + filename)
        fps = cap.get(cv2.CAP_PROP_FPS)
        quad_fps = int((fps + 3) / 4)

        fps = int(fps + 0.5)
        # size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        #         int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        # video_writer = cv2.VideoWriter('./blur/' + filename, cv2.VideoWriter_fourcc(*'mp4v'), fps, size)

        # define (Codec,CRF,preset) FFmpeg tweak parameters for writer
        output_params = {
            "-vcodec": "libx264",
            "-crf": 5,
            "-preset": "veryfast",
            "-r": fps
        }
        video_writer = WriteGear(output_filename='./blur/' + filename,
                                 compression_mode=True,
                                 logging=False,
                                 **output_params)
        # Check if camera opened successfully
        if not cap.isOpened():
            print("Error opening video stream or file")
        # Read until video is completed
        while cap.isOpened():
            # Capture frame-by-frame
            ret, frame = cap.read()
            if ret:
                start = time()
                # stamp = cap.get(cv2.CAP_PROP_POS_MSEC)
                frame_width, frame_height = cap.get(
                    cv2.CAP_PROP_FRAME_WIDTH), cap.get(
                        cv2.CAP_PROP_FRAME_HEIGHT)
                cur_frame = cap.get(1) - 1
                if cur_frame % fps < quad_fps:
                    msec_idx = int(cur_frame // fps * 1000)
                else:
                    msec_idx = int(cur_frame // quad_fps * 250)

                for fid, idx in time_face_map[str(msec_idx)].items():
                    face_policy = json.loads(
                        r.hget('video_face_policy_map', fid))
                    if requester in face_policy:
                        continue
                    pos = video_face_map[fid][str(msec_idx)]
                    x1 = int(pos['x'] - 1 / 12 * pos['width'])
                    y1 = int(pos['y'] - 1 / 12 * pos['height'])
                    x2 = int(pos['x'] + 1.2 * pos['width'])
                    y2 = int(pos['y'] + 1.2 * pos['height'])
                    x1, y1 = max(x1, 0), max(y1, 0)
                    x2, y2 = min(x2,
                                 int(frame_width)), min(y2, int(frame_height))
                    crop_img = cv2.imread(
                        './video_faces/' + fid + '_' + str(msec_idx) + '.png',
                        cv2.IMREAD_COLOR)
                    frame[y1:y2, x1:x2] = crop_img
                # Display the resulting frame
                # cv2.imshow('Frame', frame)
                video_writer.write(frame)
                # end = time()
                # t = max(int(41 - 1000 * (end - start)), 1)
                # Press Q on keyboard to  exit
                # if cv2.waitKey(t) & 0xFF == ord('q'):
                #     break
            # Break the loop
            else:
                break

        cap.release()
        video_writer.close()

        cv2.destroyAllWindows()

        # for i, ts in enumerate(timestamps):
        #     print('Frame %d :' % i, ts)
        # When everything done, release the video capture object

        # Closes all the frames
        cv2.destroyAllWindows()
    except Exception as e:
        print('error: ')
        print(e)
import os
import numpy as np
import cv2
from vidgear.gears import WriteGear

path = '/media/pns/0e3152c3-1f53-4c52-b611-400556966cd8/PNS_data/RM15/09272019/S1/videos/'
fnIn = '2019-09-27 10:22:24.811332'
fnOut = path+'exampleS1'+'.mp4'
cap = cv2.VideoCapture(path+fnIn+'.mp4')

output_params = {"-vcodec":"libx264","-crf": 0,"-output_dimensions": (688,688)}
video = WriteGear(output_filename = fnOut,compression_mode = True,logging=False,**output_params)

frame1 = 2313*25
frame2 = 2321*25

for f in np.arange(frame1,frame2):
  cap.set(1,f)
  ret, frame = cap.read()
  frame = cv2.cvtColor(frame[:,688:(2*688)], cv2.COLOR_BGR2GRAY)
  frame = cv2.cvtColor(frame,cv2.COLOR_BAYER_BG2BGR)
  video.write(frame)

# When everything done, release the video capture object
cap.release()
 
# Closes all the frames
cv2.destroyAllWindows()
video.close()
Beispiel #28
0
    def renderfile(self, starttime, stoptime, outpath = "Stabilized.mp4", out_size = (1920,1080),
                   split_screen = True, hw_accel = False, bitrate_mbits = 20, display_preview = False):
        if hw_accel:
            if platform.system() == "Darwin":  # macOS
                output_params = {
                    "-input_framerate": self.fps, 
                    "-vf": "scale=%sx%s" % (out_size[0]*2 if split_screen else out_size[0], out_size[1]),
                    "-vcodec": "h264_videotoolbox",
                    "-profile": "main", 
                    "-b:v": "%sM" % bitrate_mbits,
                    "-pix_fmt": "yuv420p",
                }
            elif platform.system() == "Windows":
                output_params = {
                    "-input_framerate": self.fps, 
                    "-vf": "scale=%sx%s" % (out_size[0]*2 if split_screen else out_size[0], out_size[1]),
                    "-vcodec": "h264_nvenc",
                    "-profile:v": "main",
                    "-rc:v": "cbr", 
                    "-b:v": "%sM" % bitrate_mbits,
                    "-bufsize:v": "%sM" % int(bitrate_mbits * 2),
                    "-pix_fmt": "yuv420p",
                }
            elif platform.system() == "Linux":
                output_params = {
                    "-input_framerate": self.fps, 
                    "-vf": "scale=%sx%s" % (out_size[0]*2 if split_screen else out_size[0], out_size[1]),
                    "-vcodec": "h264_vaapi",
                    "-profile": "main", 
                    "-b:v": "%sM" % bitrate_mbits,
                    "-pix_fmt": "yuv420p",
                }
            out = WriteGear(output_filename=outpath, **output_params)

        else:
            output_params = {
                "-input_framerate": self.fps, 
                "-vf": "scale=%sx%s" % (out_size[0]*2 if split_screen else out_size[0], out_size[1]),
                "-c:v": "libx264",
                "-crf": "1",  # Can't use 0 as it triggers "lossless" which does not allow  -maxrate
                "-maxrate": "%sM" % bitrate_mbits,
                "-bufsize": "%sM" % int(bitrate_mbits * 1.2),
                "-pix_fmt": "yuv420p",
            }
            out = WriteGear(output_filename=outpath, **output_params)
        

        crop = (int((self.width-out_size[0])/2), int((self.height-out_size[1])/2))


        self.cap.set(cv2.CAP_PROP_POS_FRAMES, int(starttime * self.fps))
        time.sleep(0.1)

        num_frames = int((stoptime - starttime) * self.fps) 


        i = 0
        while(True):
            # Read next frame
            frame_num = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
            success, frame = self.cap.read() 

            
            print("FRAME: {}, IDX: {}".format(frame_num, i))

            if success:
                i +=1

            if i > num_frames or i == len(self.stab_transform):
                break

            if success and i > 0:
                


                frame_undistort = cv2.remap(frame, self.map1, self.map2, interpolation=cv2.INTER_LINEAR,
                                              borderMode=cv2.BORDER_CONSTANT)


                #print(self.stab_transform[frame_num])
                frame_out = self.undistort.get_rotation_map(frame_undistort, self.stab_transform[frame_num])

                #frame_out = self.undistort.get_rotation_map(frame, self.stab_transform[frame_num])


                # Fix border artifacts
                frame_out = frame_out[crop[1]:crop[1]+out_size[1], crop[0]:crop[0]+out_size[0]]


                #out.write(frame_out)
                #print(frame_out.shape)

                # If the image is too big, resize it.
            #%if(frame_out.shape[1] > 1920): 
            #		frame_out = cv2.resize(frame_out, (int(frame_out.shape[1]/2), int(frame_out.shape[0]/2)));
                
                size = np.array(frame_out.shape)
                frame_out = cv2.resize(frame_out, (int(size[1]), int(size[0])))

                if split_screen:
                    # Fix border artifacts
                    frame_undistort = frame_undistort[crop[1]:crop[1]+out_size[1], crop[0]:crop[0]+out_size[0]]
                    frame = cv2.resize(frame_undistort, ((int(size[1]), int(size[0]))))
                    concatted = cv2.resize(cv2.hconcat([frame_out,frame],2), (out_size[0]*2,out_size[1]))
                    out.write(concatted)
                    if display_preview:
                        cv2.imshow("Before and After", concatted)
                        cv2.waitKey(2)
                else:
                    out.write(frame_out)
                    if display_preview:
                        cv2.imshow("Stabilized?", frame_out)
                        cv2.waitKey(2)

        # When everything done, release the capture
        #out.release()
        cv2.destroyAllWindows()
        out.close()
Beispiel #29
0
def startRecording():
    try:
        global fileNameList
        global videoStreams
        global startTime
        global stopTime
        global writer_TabletCam
        global writer_FaceCam
        stream_FaceCam = videoStreams[len(videoStreams) -
                                      1]  # penultimate stream
        stream_TabletCam = videoStreams[len(videoStreams) -
                                        2]  # ultimate stream
        output_params1 = {
            "-vcodec": "libx264",
            "-preset": "slow",
            "-bitrate": 2000000,
            "-input_framerate": stream_FaceCam.framerate
        }
        output_params2 = {"-input_framerate": stream_TabletCam.framerate}

        startTime = getTime()

        changeLEDtoRed()

        print("\n")
        print("Now Recording \n")

        fileNameList = getNewFileNames()

        writer_FaceCam = WriteGear(output_filename=fileNameList[0],
                                   **output_params1)
        writer_TabletCam = WriteGear(output_filename=fileNameList[1],
                                     **output_params2)

        stream_TabletCam.start()
        stream_FaceCam.start()
        # record frame by frame

        while (True):
            frame_TabletCam = stream_TabletCam.read()
            # read frames from stream1

            frame_FaceCam = stream_FaceCam.read()
            # read frames from stream2
            #print("stream_TabletCam.framerate:" + str(stream_TabletCam.framerate))
            #print("stream_FaceCam.framerate:" + str(stream_FaceCam.framerate))

            # check if any of two frame is None
            if ((frame_TabletCam is None) or (frame_FaceCam is None)):
                stopTime = getTime()
                break
            '''
        if frame_TabletCam is None:
            print("Frame A is none")
            stopTime = getTime()
            print("Going to Stop Recording")
            break

        if frame_FaceCam is None:
            #if True break the infinite loop
            print("Frame B is none")
            stopTime = getTime()
            print("Going to stop Recording")
            break
        '''
            #cv2.imshow("Output Frame1", frameA)
            #cv2.imshow("Output Frame2", frameB)
            # Show output window of stream1 and stream 2 seperately
            frame_TabletCam = cv2.rotate(frame_TabletCam, cv2.ROTATE_180)
            writer_TabletCam.write(frame_TabletCam)
            writer_FaceCam.write(frame_FaceCam)

            if (button.is_pressed):
                stopTime = getTime()
                #print("Going to Stop Recording")
                break

        #cv2.destroyAllWindows()

    except:
        return False
Beispiel #30
0
    def run_opencv(self, cap, from_time, to_time, duration_time, out_path):
        """[summary]

        Args:
            cap ([type]): [description]
            from_time ([type]): [description]
            to_time ([type]): [description]
            duration_time ([type]): [description]
            out_path ([type]): [description]
            override_fps ([type], optional): Enables setting fixed FPS, pot. circumventing variable FPS. Defaults to None.
        """
        # Get current width of frame
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # float
        # Get current height of frame
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float
        total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)

        # handle overwrite
        if utils.exists_file(out_path) and self.overwrite_flag == "-n":
            tqdm.write(f"File exists (overwrite = False): {out_path}")
            return

        from_frame = self.secs_to_frame(self.timedelta_to_secs(from_time))
        to_frame = self.secs_to_frame(self.timedelta_to_secs(to_time))
        num_frames = (to_frame - from_frame) + 1  # frames start at 0

        pbar = tqdm(total=num_frames, desc="cut", leave=False)

        current_frame = 0

        out_fps = None
        # set start frame depending on method
        if self.override_fps is not None:
            # leave a 100 frames margin for error
            from_compensated = round(
                (from_frame / self.override_fps) * self.fps) - 100
            if from_compensated < 0:
                from_compensated = 0
            current_frame = from_compensated
            out_fps = self.override_fps
        else:
            current_frame = from_frame
            out_fps = self.fps

        # check if ffmpeg params are provided
        use_WriteGear = "params" in self.conversion
        writer = None

        if use_WriteGear:
            self.conversion['params']['-input_framerate'] = out_fps
            # use WriteGear handling ffmpeg (https://github.com/abhiTronix/vidgear)
            # https://stackoverflow.com/questions/38686359/opencv-videowriter-control-bitrate
            #Define writer with output filename
            writer = WriteGear(output_filename=out_path,
                               compression_mode=True,
                               logging=False,
                               **self.conversion['params'])
        else:
            # use opencv VideoWriter
            # Define the codec and create VideoWriter object
            if isinstance(self.conversion["code"], str):
                fourcc = cv2.VideoWriter_fourcc(*self.conversion["code"])
            else:
                fourcc = self.conversion["code"]
            writer = cv2.VideoWriter(out_path, fourcc, out_fps,
                                     (int(width), int(height)))

        # seek by time
        # current_time_ms = (current_frame * self.fps) * 1000.0
        # assert cap.set(cv2.CAP_PROP_POS_MSEC, current_time_ms)

        # seek to start time, make sure frames match
        assert cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame)
        # DEBUG
        # tqdm.write(f"{current_frame} - {cap.get(cv2.CAP_PROP_POS_FRAMES)}")
        # tqdm.write(f"{current_time_ms} - {cap.get(cv2.CAP_PROP_POS_MSEC)}")
        assert current_frame == cap.get(cv2.CAP_PROP_POS_FRAMES)

        # while(True):
        while cap.isOpened():
            # Capture frame-by-frame
            # ret, frame = cap.read()
            ret = cap.grab()  # faster than always decoding all frames
            frame = None

            if ret == True:
                if self.override_fps is not None:
                    # calculate frame using MSEC
                    millis = cap.get(cv2.CAP_PROP_POS_MSEC)
                    secs = millis / 1000.0
                    calc_frame = round(secs * self.override_fps)
                    if calc_frame >= from_frame and calc_frame <= to_frame:
                        _, frame = cap.retrieve()
                    elif calc_frame < from_frame:
                        pass
                    else:
                        # sequence is extracted, i.e. calc_frame > to_frame
                        break
                else:
                    if current_frame > to_frame:
                        break
                    _, frame = cap.retrieve()

                # frame overlay
                # text = f"FRAME {currentFrame}"
                # frame = opencv_utils.overlay_text(frame, text, x_pos = int(x), y_pos = 20)

                # Saves for video
                if frame is not None:
                    writer.write(frame)

                # Display the resulting frame
                # cv2.imshow('frame',frame)
            else:
                break

            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

            current_frame += 1
            if frame is not None:
                pbar.update(1)

        pbar.close()
        if use_WriteGear:
            writer.close()
        else:
            writer.release()
        cv2.destroyAllWindows()