Esempio n. 1
0
def ffmpeg_export(
    p_indir
):  # the real meat, this is where i struggle with ffmpeg-python and occasionally succeed
    des_w = int(entry_w.get())
    des_h = int(entry_h.get())
    if (des_w > sel_w) or (des_h > sel_h):
        messagebox.showerror(
            title="Error", message="Desired size is larger than source size!")
        return
    sel_ratio = sel_w / sel_h
    des_ratio = des_w / des_h
    # safe placeholder values for when src and output have the same ratio
    x_offset = 0
    y_offset = 0
    adj_w = sel_w
    adj_h = sel_h
    if (crop_h.get() == 1) and (sel_ratio != des_ratio):
        adj_w = des_ratio * sel_h  # get the new width for the desired aspect ratio
        x_offset = (sel_w - adj_w) / 2  # centering math
    elif (crop_h.get() == 0) and (sel_ratio != des_ratio):
        adj_h = des_ratio * sel_w
        y_offset = (sel_h - adj_h) / 2
    for x in files:
        x = p_indir + os.sep + x
        progress_ffmpeg.config(text='Rendering: ' + os.path.split(x)[1])
        frame_ffmpeg.update()
        # ffmpeg complains if we try to output to the same file as our input
        # so we output to a different file and replace the input afterwards
        outdir = x + '~.png'
        if overwrite_og.get() == 0 and use_custom_outdir.get() == 0:
            newdir = os.path.dirname(str(x)) + '_' + \
                str(des_w) + 'x' + str(
                    des_h)  # results in {old directory}_{width}x{height} in the old directory's parent dir
            outdir = newdir + os.sep + str(os.path.split(x)[1])
            if not os.path.isdir(newdir):
                os.mkdir(newdir)
        elif use_custom_outdir.get() == 1:
            outdir = custom_outdir + os.sep + str(os.path.split(x)[1])
        stream = ffmpeg.input(str(x), nostdin=None)
        stream = ffmpeg.crop(stream, x_offset, y_offset, adj_w, adj_h)
        stream = ffmpeg.filter(
            stream, "scale", des_w, des_h,
            flags="bilinear")  # TODO: allow user to choose algorithm
        stream = ffmpeg.output(
            stream, outdir, hide_banner=None
        )  # TODO: find a way to stop making a new shell for each op
        stream = ffmpeg.overwrite_output(stream)
        ffmpeg.run_async(stream)
        if overwrite_og.get(
        ) == 1:  # check again because overwriting when we're not supposed to is bad mkay
            os.remove(x)
            os.rename(x + '~.png', x)
    progress_ffmpeg.config(text='Rendering: Done!')
Esempio n. 2
0
def _save_video(path, array, fps=25, video_codec='libx264'):

    # Check the extension of the given file
    path = _check_extensions([path], extensions=['.mp4'])[0]

    # Get the informations from the array
    n, height, width, channels = array.shape

    # Initialize the process
    process = ffmpeg.input('pipe:',
                           format='rawvideo',
                           pix_fmt='rgb24',
                           s='{}x{}'.format(width, height))
    process = ffmpeg.output(process,
                            path,
                            pix_fmt='yuv420p',
                            vcodec=video_codec,
                            r=fps)
    process = ffmpeg.overwrite_output(process)
    process = ffmpeg.run_async(process, pipe_stdin=True)

    # Save all the frames
    for frame in array:
        process.stdin.write(frame.tobytes())

    # Terminate the process
    process.stdin.close()
    process.wait()
Esempio n. 3
0
    def start_recording(self, output_file, filters={}, overwrite=True):
        """Starts recording to a given output video file.

        Parameters
        ----------
        output_file : :obj:`str`
            filename to write video to
        filters : dict of dicts
            filters to apply to the video
        overwrite : bool
            whether to overwrite the output file if it exists
        """
        if self._recording:
            raise Exception(
                "Cannot record a video while one is already recording!")
        self._recording = True
        stream = ffmpeg.input(
            "/dev/video{}".format(self._device),
            f=self._format,
            s="{}x{}".format(*self._res),
            framerate=self._fps,
        )
        for filter_name, filter_kwargs in filters.items():
            stream = ffmpeg.filter(stream, filter_name, **filter_kwargs)
        stream = ffmpeg.output(stream, output_file)
        self._video = ffmpeg.run_async(stream,
                                       quiet=True,
                                       overwrite_output=overwrite)
    def _yield_streaming_content(self, videoid, container_format='mp4'):
        video_container, audio_container, output_args = self._get_output_video_parameters(
            container_format)
        video_format, audio_format = self._get_youtube_urls(
            videoid, video_container, audio_container)
        video_input = ffmpeg.input(video_format['url'])
        audio_input = ffmpeg.input(audio_format['url'])
        output = ffmpeg.output(video_input,
                               audio_input,
                               '-',
                               codec='copy',
                               format=container_format,
                               **output_args)
        #print(ffmpeg.compile(output))
        process = ffmpeg.run_async(output, pipe_stdout=True, quiet=True)

        def terminate_process():
            print("Killing ffmpeg process because of timeout.")
            process.kill()

        watchdog = WatchDog(terminate_process, 30)
        watchdog.run()

        def content():
            while True:
                read_data = process.stdout.read(1024)
                yield read_data
                watchdog.process_still_active()
                if len(read_data) == 0 and process.poll() is not None:
                    break
            process.kill()
            print("Terminated ffmpeg")

        return content()
Esempio n. 5
0
    def main(self, **kwargs):
        """Start the record"""
        super().create()
        now = datetime.now().strftime('%Y%m%d%H%M.mp4')
        filename = os.path.join(self.settings['path'], 'video_' + now)
        settings = self.settings['record']
        stream = ffmpeg.input(**settings['input']).setpts(settings['setpts'])
        stream = ffmpeg.output(stream, filename, **settings['output'])
        process = ffmpeg.run_async(
            stream,
            pipe_stdin=True,
            pipe_stdout=True,
            pipe_stderr=self.args.show,
            overwrite_output=True,
        )
        try:
            if self.args.time:
                self.logger.info('record the screen for %i sec',
                                 self.args.time)
                for _ in range(self.args.time):
                    sleep(1)
            else:
                input('press enter to finish ')
        except KeyboardInterrupt:
            self.logger.info('breack with KeyboardInterrupt')

        finally:
            self.logger.info('save file')
            process.communicate(input=b"q")
Esempio n. 6
0
def test__run_async(mocker, pipe_stdin, pipe_stdout, pipe_stderr, cwd):
    process__mock = mock.Mock()
    popen__mock = mocker.patch.object(subprocess,
                                      'Popen',
                                      return_value=process__mock)
    stream = _get_simple_example()
    process = ffmpeg.run_async(
        stream,
        pipe_stdin=pipe_stdin,
        pipe_stdout=pipe_stdout,
        pipe_stderr=pipe_stderr,
        cwd=cwd,
    )
    assert process is process__mock

    expected_stdin = subprocess.PIPE if pipe_stdin else None
    expected_stdout = subprocess.PIPE if pipe_stdout else None
    expected_stderr = subprocess.PIPE if pipe_stderr else None
    (args, ), kwargs = popen__mock.call_args
    assert args == ffmpeg.compile(stream)
    assert kwargs == dict(
        stdin=expected_stdin,
        stdout=expected_stdout,
        stderr=expected_stderr,
        cwd=cwd,
    )
Esempio n. 7
0
    def run(self, quiet=True):
        """
        Runs an endless loop of consuming images from the source and sending them to the Device

        Syntax:
            webcam = WebcamSource()
            fvd.initInput(webcam)
            fvd.initOutput(69, 1920, 1080)
            fvd.run()
        """

        if self.output is None:
            raise Exception("Specify output first")

        self.running = True

        self.ffmpeg_proc = ffmpeg.run_async(self.output,
                                            pipe_stdin=True,
                                            quiet=quiet)
        img_gen = self.vid_source.generator()

        while self.running:
            image = next(img_gen).astype(np.uint8)

            if image is None:
                self.ffmpeg_proc.terminate()
                self.running = False
                break

            self.ffmpeg_proc.stdin.write(image.tobytes())

            self.__delay_til_next_frame()
Esempio n. 8
0
    def __read_buffer(self):
        self.stream = ffmpeg.run_async(self.source_stream, pipe_stdout=True)
        while self.stream.poll() is None:
            buffer = self.stream.stdout.read(self.read_byte_size)

            if self.auto_stop and time.time() - self.end_time > self.auto_stop:
                break

            if not buffer:
                continue

            try:
                self.pool_frames.put(buffer, timeout=self.auto_stop)
            except Full:
                break

        # Stop record
        try:
            self.pool_frames.get_nowait()
        except IndexError:
            pass
        except Empty:
            pass

        self.pool_frames.put(self.__STOP_BYTES)
        self.stream.kill()
        if self.stream.stdout:
            self.stream.stdout.close()
        self.stream = None
        self.thread = None
Esempio n. 9
0
    def __init__(self, stream, size, pixel_format=PIXEL_RGB_FORMAT, get_latest=False, cache_frames=30):
        self.stream = ffmpeg.run_async(stream, pipe_stdout=True)
        self.size = size
        self.pix_fmt = pixel_format

        self.pool_frames = self.Box() if get_latest else Queue(maxsize=cache_frames)
        self.thread = Thread(target=self.read_buffer)
        self.read_byte_size = self.size[0] * self.size[1] * 3
def record_stream(stream, recording_time):
    # Launch video recording
    popen = ffmpeg.run_async(stream, pipe_stdin=True)
    time.sleep(recording_time * 60)
    # Stop video recording
    popen.communicate(str.encode("q"))  # Equivalent to send a Q
    # To be sure that the process ends I wait 3 seconds and then terminate de process (wich is more like kill -9)
    time.sleep(3)
    popen.terminate()
Esempio n. 11
0
 def create_popen(path_file_input: Path) -> Popen[bytes]:
     stream = ffmpeg.input(path_file_input)
     stream = ffmpeg.filter(stream, "scale", 768, -1)
     stream_spec = ffmpeg.output(stream, "pipe:",
                                 f="rawvideo").global_args("-n")
     # Reason: Requires to update ffmpeg-python side.
     return ffmpeg.run_async(stream_spec,
                             pipe_stdin=True,
                             pipe_stdout=True,
                             pipe_stderr=True)  # type: ignore
Esempio n. 12
0
def save_video(fname, images, output_fps=30, vcodec='libx264', filters=''):
    assert isinstance(images, np.ndarray), "images should be np.array: NHWC"
    num_frames, height, width, channels = images.shape
    stream = ffmpeg.input('pipe:', format='rawvideo', 
                          pix_fmt='rgb24', s='{}x{}'.format(width, height))
    stream = ffmpeg.filter(stream, 'setpts', '2*PTS')  # 2*PTS is for slower playback
    stream = ffmpeg.output(stream, fname, pix_fmt='yuv420p', vcodec=vcodec, r=output_fps)
    stream = ffmpeg.overwrite_output(stream)
    process = ffmpeg.run_async(stream, pipe_stdin=True)
    for frame in tqdm(images, desc='writing video to %s' % fname):
        process.stdin.write(frame.astype(np.uint8).tobytes())
    process.stdin.close()
    process.wait()
 def start_slink_only_process(self):
     print('Starting Streamlink Only Process...')
     self.thread_slk.start()
     while True:
         self.dec_process = ffmpeg.input('pipe:', f='mpegts')
         self.dec_process = ffmpeg.output(self.dec_process.video,
                                          f"udp://{self.target_url}",
                                          f='mpegts')
         self.dec_process = ffmpeg.run_async(self.dec_process,
                                             quiet=True,
                                             pipe_stdin=True)
         time.sleep(60)
         self.dec_process.terminate()
def main() -> None:
    global flag
    global sub_process
    filming = False
    # init ros node
    rospy.init_node("image_recorder", anonymous=True, disable_signals=True)
    signal.signal(signal.SIGTERM, handler_stop)
    signal.signal(signal.SIGINT, handler_stop)

    # create camera object and Configure streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, WIDTH, HEIGHT, rs.format.bgr8, 30)
    rospy.loginfo("node is up")

    # Start streaming
    pipeline.start(config)

    # start-stop Subscriber
    ss_sub = rospy.Subscriber("/film", String, callback, queue_size=1)
    try:
        while not rospy.is_shutdown():
            frames = pipeline.wait_for_frames()
            color_frame = frames.get_color_frame()
            if not color_frame:
                continue

            if flag == "start":  # start filming a video
                process = subprocess_create()
                rospy.loginfo("start filming")
                sub_process = ffmpeg.run_async(process, pipe_stdin=True)
                rospy.sleep(2)
                flag = " "
                filming = True

            if flag == "stop":  # stop filming a video
                filming = False
                sub_process.stdin.close()
                sub_process.wait()
                rospy.loginfo("stop filming")
                flag = " "
                rospy.sleep(5)

            if filming:
                color_image = np.asanyarray(color_frame.get_data())
                sub_process.stdin.write(color_image[:, :, ::-1].astype(np.uint8).tobytes())

    finally:
        # Stop streaming
        pipeline.stop()
        rospy.signal_shutdown("error!")
 def start_stream_decoder(self):
     print('Starting Stream Decoder...')
     self.dec_process = ffmpeg.input(
         'pipe:',
         #blocksize=BSIZE,
         f='mpegts',
         vsync='drop')
     self.dec_process = ffmpeg.output(
         self.dec_process.video,
         'pipe:',
         #blocksize=BSIZE,
         f='rawvideo',
         pix_fmt='bgr24')
     self.dec_process = ffmpeg.run_async(self.dec_process,
                                         quiet=True,
                                         pipe_stdin=True,
                                         pipe_stdout=True)
     self.thread_slk.start()
Esempio n. 16
0
def save_video(fname, images, output_fps=30, vcodec="libx264", filters=""):
    assert isinstance(images, np.ndarray), "images should be np.array: NHWC"
    num_frames, height, width, channels = images.shape
    stream = ffmpeg.input("pipe:",
                          format="rawvideo",
                          pix_fmt="rgb24",
                          s="{}x{}".format(width, height))
    stream = ffmpeg.filter(stream, "setpts",
                           "2*PTS")  # 2*PTS is for slower playback
    stream = ffmpeg.output(stream,
                           fname,
                           pix_fmt="yuv420p",
                           vcodec=vcodec,
                           r=output_fps)
    stream = ffmpeg.overwrite_output(stream)
    process = ffmpeg.run_async(stream, pipe_stdin=True)
    for frame in tqdm(images, desc="writing video to %s" % fname):
        process.stdin.write(frame.astype(np.uint8).tobytes())
    process.stdin.close()
    process.wait()
 def start_stream_encoder(self):
     print('Starting Stream Encoder...')
     self.enc_process = ffmpeg.input(
         'pipe:',
         #blocksize=BSIZE,
         f='rawvideo',
         pix_fmt='bgr24',
         s=f'{self.width}x{self.height}')
     self.enc_process = ffmpeg.output(
         self.enc_process.video,
         f"udp://{self.target_url}",
         f='mpegts',
         #framerate=30,
         #maxrate='1M', bufsize='4M' )
         maxrate='2M',
         bufsize='4M')
     self.enc_process = ffmpeg.overwrite_output(self.enc_process)
     self.enc_process = ffmpeg.run_async(self.enc_process,
                                         quiet=False,
                                         pipe_stdin=True)
Esempio n. 18
0
    def run(self, quiet=True):
        """
        Runs an endless loop of consuming images from the source and sending them to the Device

        Syntax:
            webcam = WebcamSource()
            fvd.initInput(webcam)
            fvd.initOutput(69, 1920, 1080)
            fvd.run()
        """

        if self.output is None:
            raise Exception("Specify output first")

        self.running = True

        self.ffmpeg_proc = ffmpeg.run_async(self.output,
                                            pipe_stdin=True,
                                            quiet=quiet)
        img_gen = self.vid_source.generator()

        while self.running:
            image = next(img_gen).astype(np.uint8)

            if image is None:
                self.ffmpeg_proc.terminate()
                self.running = False
                break
            try:
                self.ffmpeg_proc.stdin.write(image.tobytes())
            except BrokenPipeError as error:
                # ffmpeg probably crashed, lets print the issue of ffmpeg
                print(
                    "Couldn't write image to ffmpeg, error output of ffmpeg:",
                    file=sys.stderr)
                print(b"\n".join(
                    self.ffmpeg_proc.stderr.readlines()).decode("utf-8"),
                      file=sys.stderr)
                raise error

            self.__delay_til_next_frame()
Esempio n. 19
0
def save_video(args,
               fname,
               images,
               output_fps=30,
               vcodec='libx264',
               filters=''):
    assert isinstance(images, np.ndarray), "images should be np.array: NHWC"
    num_frames, height, width, channels = images.shape

    if (args.img_datatype == cv2.CV_8UC1) or (args.img_datatype
                                              == cv2.CV_8UC3):
        target_datatype = np.uint8
        pixel_format = 'rgb24'
    elif args.img_datatype == cv2.CV_16UC1:
        target_datatype = np.uint16
        pixel_format = 'rgb48'
    else:
        raise ValueError('Unknown image data type.')

    stream = ffmpeg.input('pipe:',
                          format='rawvideo',
                          pix_fmt=pixel_format,
                          s='{}x{}'.format(width, height))
    stream = ffmpeg.filter(stream, 'setpts',
                           '2*PTS')  # 2*PTS is for slower playback
    stream = ffmpeg.output(stream,
                           fname,
                           pix_fmt='yuv420p',
                           vcodec=vcodec,
                           r=output_fps)
    stream = ffmpeg.overwrite_output(stream)
    process = ffmpeg.run_async(stream, pipe_stdin=True)

    for frame in tqdm(images, desc='writing video to %s' % fname):
        process.stdin.write(frame.astype(target_datatype).tobytes())
    process.stdin.close()
    process.wait()
Esempio n. 20
0
 def read_buffer(self):
     self.stream = ffmpeg.run_async(self.source_stream, pipe_stdout=True)
     while self.stream.poll() is None:
         buffer = self.stream.stdout.read(self.chunk_size)
         self.pool_frames.put(buffer)
Esempio n. 21
0
    def start_recording(self):
        """
        Start recording the stream defined on self.stream_uri
        :return: boolean if the recording started sucessfully
        """

        # if we are dealing with a USB camera or RTSP stream we
        # treat them differently, for example remove the socket TCP I/O timeout (stimeout)
        parsed_uri = urlparse(self.stream_uri)
        if parsed_uri.scheme == "rtsp":
            stream_input = ffmpeg.input(
                self.stream_uri,
                nostdin=None,
                use_wallclock_as_timestamps=1,
                stimeout=self._def_stimeout,
                fflags="+genpts",
                rtsp_transport='tcp')  # stimeout in microsecondss
        else:
            stream_input = ffmpeg.input(self.stream_uri)

        # store the files in segments to prevent corruption
        segment_fpath = self.prepare_filepath_for_segment(
            self.output_filepath, self._def_segment_fname_size)

        # ffmpeg -use_wallclock_as_timestamps 1 -fflags +genpts -rtsp_transport tcp -stimeout 3000000
        # -i rtsp://admin:[email protected]:554/stream0 -f segment -b:v 900k -an -flags +global_header -map 0
        # -map_metadata -1 -movflags +frag_keyframe+separate_moof+omit_tfhd_offset+empty_moov -reset_timestamps 1
        # -segment_format matroska
        # -segment_list /tmp/stored_streams/2.stream_192_168_3_22_554.2019-08-18.14.02.53_video_list.txt
        # -segment_list_type ffconcat -segment_time 20 -strict 2 -vcodec copy -use_wallclock_as_timestamps 1
        # -fflags +genpts /tmp/stored_streams/2.stream_192_168_3_22_554.2019-08-18.14.02.53-%03d.mkv -y

        output_arguments = [
            ("strict", 2),
            ("f", "segment"),
            ("map", 0),
            ("segment_time", self._def_segment_time),
            ("segment_format", self._def_segment_format),
            ("segment_list", self.segment_filelist),
            ("segment_list_type", "ffconcat"),
            ("vcodec", self._def_vcodec),
            ("video_bitrate", self._def_bitrate),
            ("flags", "+global_header"),
            ("reset_timestamps", 1),
            ("map_metadata", -1),
            ("use_wallclock_as_timestamps", 1),
            ("fflags", "+genpts"),
            ("movflags",
             "+frag_keyframe+separate_moof+omit_tfhd_offset+empty_moov"),
        ]

        # if there is no audio codec then use the an
        # flag to remove audio from the recorded stream
        if self._def_acodec != "":
            output_arguments.append(("acodec", self._def_acodec))
        else:
            output_arguments.append(("an", None))

        ffmpeg_output_streams = [
            ffmpeg.output(stream_input, segment_fpath,
                          **OrderedDict(output_arguments))
        ]

        output_streams = ffmpeg.merge_outputs(*ffmpeg_output_streams)
        output_streams = ffmpeg.overwrite_output(output_streams)

        debug_command = ffmpeg.compile(output_streams)
        print("ffmpeg command: {}".format(' '.join(debug_command)))

        self.start_time = time.time()
        self.proc = (
            # ALERT: https://stackoverflow.com/questions/16523746/ffmpeg-hangs-when-run-in-background
            # clean the stderr / stdout regularly to prevent this process for freezing
            ffmpeg.run_async(output_streams,
                             pipe_stdout=True,
                             pipe_stderr=True,
                             overwrite_output=True))

        start_timeout = time.time()
        is_reached_size = False
        while self.proc.poll() is None and time.time(
        ) - start_timeout < self._def_timeout_secs and not is_reached_size:
            try:
                if os.path.getsize(self.get_last_segment_output_filepath()
                                   ) > self._def_min_video_size_bytes:
                    is_reached_size = True
            except OSError as e:
                pass
            time.sleep(0.1)

        if self.proc.poll() is None:
            return True
        else:
            self.start_time = 0
            self.out, self.err = self.proc.communicate()
            return False
Esempio n. 22
0
 def create_popen(self) -> Popen[bytes]:
     # Reason: Requires to update ffmpeg-python side.
     return ffmpeg.run_async(self.stream_spec,
                             pipe_stdin=True,
                             pipe_stdout=True,
                             pipe_stderr=True)  # type: ignore
Esempio n. 23
0
 def run_ffmpeg(self):
     ffcmd = ffmpeg.input(self.device, format='v4l2', input_format='mjpeg',
             s='svga', r=7.5)
     ffcmd = ffmpeg.output(ffcmd, 'pipe:', format='mjpeg', vcodec='copy')
     self.ffproc = ffmpeg.run_async(ffcmd, pipe_stdout=True, pipe_stdin=True )
     self.running = True