예제 #1
0
def test__merge_outputs():
    in_ = ffmpeg.input('in.mp4')
    out1 = in_.output('out1.mp4')
    out2 = in_.output('out2.mp4')
    assert ffmpeg.merge_outputs(
        out1, out2).get_args() == ['-i', 'in.mp4', 'out1.mp4', 'out2.mp4']
    assert ffmpeg.get_args([out1,
                            out2]) == ['-i', 'in.mp4', 'out2.mp4', 'out1.mp4']
예제 #2
0
def convert_to_hls(input_file,
                   segment_format='%03d.ts',
                   output_dir='cambridge-HLS'):

    ffmpeg_input_stream = ffmpeg.input(input_file)
    ffmpeg_output_streams = []

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for rendition in renditions:
        ffmpeg_params = {
            'vf':
            "scale=w={}:h={}:force_original_aspect_ratio=decrease".format(
                rendition['resolution'].split('x')[0],
                rendition['resolution'].split('x')[1]),
            'c:a':
            'aac',
            'ar':
            '48000',
            'c:v':
            'h264',
            'profile:v':
            'main',
            'crf':
            '20',
            'sc_threshold':
            '0',
            'g':
            '48',
            'keyint_min':
            '48',
            'hls_time':
            '4',
            'hls_playlist_type':
            'vod',
            'b:v':
            f"{rendition['bitrate']}",
            'maxrate':
            '856k',
            'bufsize':
            '1200k',
            'b:a':
            f"{rendition['audiorate']}",
            'hls_segment_filename':
            f"{output_dir}/{rendition['resolution'].split('x')[1]}p_{segment_format}"
        }

        ffmpeg_output_streams.append(
            ffmpeg.output(
                ffmpeg_input_stream,
                f"{output_dir}/{rendition['resolution'].split('x')[1]}p.m3u8",
                **ffmpeg_params))

        output_streams = ffmpeg.merge_outputs(*ffmpeg_output_streams)
        ffmpeg.run(output_streams)
    generate_master_m3u8(output_dir)
예제 #3
0
def test_multi_passthrough():
    out1 = ffmpeg.input('in1.mp4').output('out1.mp4')
    out2 = ffmpeg.input('in2.mp4').output('out2.mp4')
    out = ffmpeg.merge_outputs(out1, out2)
    assert ffmpeg.get_args(out) == [
        '-i', 'in1.mp4', '-i', 'in2.mp4', 'out1.mp4', '-map', '1', 'out2.mp4'
    ]
    assert ffmpeg.get_args([out1, out2]) == [
        '-i', 'in2.mp4', '-i', 'in1.mp4', 'out2.mp4', '-map', '1', 'out1.mp4'
    ]
예제 #4
0
def test__multi_output_edge_label_order():
    scale2ref = ffmpeg.filter_multi_output(
        [ffmpeg.input('x'), ffmpeg.input('y')], 'scale2ref')
    out = ffmpeg.merge_outputs(
        scale2ref[1].filter('scale').output('a'),
        scale2ref[10000].filter('hflip').output('b'),
    )

    args = out.get_args()
    flt_cmpl = args[args.index('-filter_complex') + 1]
    out1, out2 = get_filter_complex_outputs(flt_cmpl, 'scale2ref')
    assert out1 == get_filter_complex_input(flt_cmpl, 'scale')
    assert out2 == get_filter_complex_input(flt_cmpl, 'hflip')
예제 #5
0
def test_multi_passthrough():
    out1 = ffmpeg.input('in1.mp4').output('out1.mp4')
    out2 = ffmpeg.input('in2.mp4').output('out2.mp4')
    out = ffmpeg.merge_outputs(out1, out2)
    assert ffmpeg.get_args(out) == [
        '-i', 'in1.mp4',
        '-i', 'in2.mp4',
        'out1.mp4',
        '-map', '[1]',  # FIXME: this should not be here (see #23)
        'out2.mp4'
    ]
    assert ffmpeg.get_args([out1, out2]) == [
        '-i', 'in2.mp4',
        '-i', 'in1.mp4',
        'out2.mp4',
        '-map', '[1]',  # FIXME: this should not be here (see #23)
        'out1.mp4'
    ]
예제 #6
0
    def __enter__(self):
        global_args = []
        ffinput = ffmpeg.input('pipe:',
                               framerate=self.fps,
                               format='rawvideo',
                               pix_fmt='rgb32',
                               hwaccel='auto',
                               s='{}x{}'.format(self.width, self.height))

        if self.quiet:
            global_args = ['-hide_banner', '-nostats', '-loglevel', 'fatal']

        if self.multi_output:
            split = ffinput.filter_multi_output('split')
            raw_alpha = ffmpeg.filter_(
                split.stream(1), 'lutrgb', **{
                    'r': 'maxval',
                    'g': 'maxval',
                    'b': 'maxval'
                })
            key_video = ffmpeg.overlay(split.stream(2), raw_alpha)
            out_key = key_video.output(self.key_out_filename)
            out_main = split.stream(3).output(self.out_filename,
                                              **self.options)

            self.proc = (ffmpeg.merge_outputs(
                out_main,
                out_key).global_args(*global_args).run_async(pipe_stdin=True))

        else:

            self.proc = (ffinput.output(
                self.out_filename,
                **self.options).overwrite_output().global_args(
                    *global_args).run_async(pipe_stdin=True))
        return self.proc
# %%
print("Parsing video file...")
video = TrimVideo(args.video, time_range=(he_range[0][0], he_range[-1][1]))

files, fast_trims, slow_trims = [], [], []

for i, current_he_range in enumerate(he_range):
    current_files, current_fast_trims, current_slow_trims \
        = video.generate_trim(current_he_range[0], current_he_range[1], prefix=str(i))
    files += current_files
    fast_trims += current_fast_trims
    slow_trims += current_slow_trims
print("Trimming video file...")
if len(fast_trims) > 0:
    print(ffmpeg.merge_outputs(*fast_trims).compile())
    ffmpeg.merge_outputs(*fast_trims).run(overwrite_output=True)
if len(slow_trims) > 0:
    print(ffmpeg.merge_outputs(*slow_trims).compile())
    ffmpeg.merge_outputs(*slow_trims).run(overwrite_output=True)
temp_merge_path = os.path.join(video.temp_dir, "merged.mp4")
merge_cmd = video.generate_merge(files, temp_merge_path)
print(merge_cmd.compile())
print("Merging video file...")
merge_cmd.run(overwrite_output=True)
merged_input = ffmpeg.input(temp_merge_path)
copy_cmd = ffmpeg.output(merged_input, args.output, c='copy')
print("Copying video to destination...")
print(copy_cmd.compile())
copy_cmd.run(overwrite_output=True)
video.clean_temp()
예제 #8
0
def handle_stream(camera_id):
    try:
        camera = Camera.objects.get(pk=camera_id)
    except Camera.DoesNotExist:
        raise CommandError(f'Camera "{camera_id}" does not exist')

    if not camera.enabled:
        print(f"'{camera.name}' is disabled.")
        return

    stream_url = camera.urls()[0]

    print(f"{camera_id}: Probing camera...")
    probe = ffmpeg.probe(stream_url)
    print(f"{camera_id}: Probe completed.")

    video_stream = next(
        (stream
         for stream in probe["streams"] if stream["codec_type"] == "video"),
        None,
    )
    if video_stream is None:
        raise CommandError(f"{camera_id}: No video stream found during probe.")

    codec_name = video_stream["codec_name"]
    width = int(video_stream["width"])
    height = int(video_stream["height"])
    r_frame_rate_parts = video_stream["r_frame_rate"].split("/")
    r_frame_rate = int(r_frame_rate_parts[0]) / int(r_frame_rate_parts[1])

    print()
    print(f"{camera_id}: Stream configuration:")
    print(f"{camera_id}: - Codec:       {codec_name}")
    print(f"{camera_id}: - Size:        {width}x{height}")
    print(f"{camera_id}: - Frame rate:  {r_frame_rate}")

    ds = get_detection_settings(camera)

    drawtext_enabled = True if camera.overlays.count() > 0 else False
    drawbox_enabled = (ds["md_visualize"] or ds["od_visualize"]
                       or ds["fd_visualize"] or ds["ld_visualize"])
    detect_enabled = (drawbox_enabled or ds["md_enabled"] or ds["od_enabled"]
                      or ds["fd_enabled"] or ds["ld_enabled"])

    print()
    print(f"{camera_id}: Feature configuration:")
    print(f"{camera_id}: - Detection:       {detect_enabled}")
    print(f"{camera_id}:   - Visualization: {drawbox_enabled}")
    print(f"{camera_id}: - Text overlay:    {drawtext_enabled}")

    decode_enabled = detect_enabled
    overlay_enabled = drawtext_enabled and drawbox_enabled
    encode_enabled = drawtext_enabled or drawbox_enabled
    transcode_enabled = not encode_enabled
    copy_enabled = transcode_enabled and (codec_name == "h264"
                                          or codec_name == "hevc")

    print()
    print(f"{camera_id}: Pipeline configuration:")
    print(f"{camera_id}: - Decode:  {decode_enabled}")
    print(f"{camera_id}: - Merge:   {overlay_enabled}")
    print(f"""{camera_id}: - Output:  {'Encode' if encode_enabled
        else ('Copy' if copy_enabled else 'Transcode')}""")

    # TODO: Add appropriate hardware acceleration
    # Intel/AMD:
    #   -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format vaapi
    #   -i <input> -c:v h264_vaapi <output>
    # Nvidia:
    #   -hwaccel cuda -hwaccel_output_format cuda
    #   -i <input> -c:v h264_nvenc <output>

    rawvideo_params = {
        "format": "rawvideo",
        "pix_fmt": "rgb24",
        "s": f"{width}x{height}",
    }

    hls_params = {
        "flags": "+cgop",
        "g": r_frame_rate,
        "hls_time": 1,
        "hls_list_size": 900,
        "hls_flags": "delete_segments",
    }

    mp4_params = {
        "movflags": "+faststart",
    }

    stream_dir = f"{settings.STORAGE_DIR}/stream/{camera.id}"
    record_dir = f"{settings.STORAGE_DIR}/record/{camera.id}"
    makedirs(stream_dir, exist_ok=True)
    makedirs(record_dir, exist_ok=True)

    fifo_path = mkfifotemp("h264")

    output = ffmpeg.input(
        stream_url,
        **({
            "rtsp_transport": "tcp"
        } if camera.camera_type.streams.all()[0].force_tcp else {}),
    )
    outputs = []

    drawtext = None
    drawbox = None

    if decode_enabled:
        outputs.append(output.output("pipe:", **rawvideo_params))
    if drawtext_enabled:
        drawtext = output.drawtext("Hello, world!")
        output = drawtext
    if drawbox_enabled:
        drawbox = ffmpeg.input("pipe:", **rawvideo_params)
        output = drawbox
    if overlay_enabled:
        output = ffmpeg.overlay(drawtext, drawbox)

    if encode_enabled:
        split = output.filter_multi_output("split")
        inputs = [split.stream(0), split.stream(1)]
    else:
        inputs = [output, output]

    outputs.append(inputs[0].output(
        f"{stream_dir}/out.m3u8",
        vcodec="copy" if copy_enabled else "h264_vaapi",
        **hls_params,
    ))
    outputs.append(inputs[1].output(
        fifo_path,
        vcodec="copy" if copy_enabled else "h264").overwrite_output())

    main_cmd = ffmpeg.merge_outputs(*outputs)
    main_cmd = main_cmd.global_args("-hide_banner", "-loglevel", "error")

    print()
    print(f"{camera_id}: Starting stream...")
    main_process = main_cmd.run_async(pipe_stdin=True, pipe_stdout=True)
    print(f"{camera_id}: Started stream.")

    print()
    print(f"{camera_id}: Starting segmented recorder...")
    record_process = Process(
        target=segment_h264,
        args=(
            camera,
            fifo_path,
            f"{record_dir}/VID_%Y%m%d_%H%M%S.mp4",
            mp4_params,
        ),
        name=f"'{camera.name}'-record",
    )
    record_process.start()
    print(f"{camera_id}: Started segmented recorder.")

    manual_exit = False

    try:
        if decode_enabled:
            print()
            print(f"{camera_id}: Starting overlay loop...")

            while main_process.poll() is None:
                in_bytes = main_process.stdout.read(width * height * 3)
                if not in_bytes:
                    break
                frame = np.frombuffer(in_bytes,
                                      np.uint8).reshape([height, width, 3])

                if detect_enabled:
                    # TODO: Do detection on frame
                    pass

                if drawbox_enabled:
                    # TODO: Do drawbox on frame
                    pass

                    main_process.stdin.write(frame.astype(np.uint8).tobytes())

        else:
            print()
            print(f"{camera_id}: Waiting for end of stream...")
            main_process.wait()

        print(f"{camera_id}: Stream ended. Status: {main_process.returncode})")

    except KeyboardInterrupt:
        manual_exit = True

    print()
    print(f"{camera_id}: Stopping stream and segmented recorder...")

    main_process.terminate()
    kill(record_process.pid, SIGINT)
    record_process.join()

    camera.last_ping = None
    camera.save()

    print(f"{camera_id}: All done.")

    if not manual_exit:
        exit(2)
예제 #9
0
파일: video.py 프로젝트: TinDang97/DPS_Util
    def write(self,
              output,
              over_write=False,
              encoder=H265_ENCODER,
              pix_fmt=YUV420P,
              output_size=None,
              keep_ratio=True,
              fps=-1,
              duration=0,
              preview=False,
              preview_size=SD_RESOLUTION,
              log_level=LOG_ERROR):
        """
        Capture input stream to file. With preview options

        Preview mode can down speed processing 1.5x -> 2x.

        Parameters
        ----------
        output: str
            Support: file's path | pipeline ("pipe:") | URL

        over_write: bool
            Overwrite existed file if True.

        encoder: str
            (Default: H265_ENCODER) Encode codec.

        pix_fmt: str
            (Default: YUV420P) Pixel format

        output_size: tuple[int, int]
            Output size

        output_size: tuple[int, int]
            Output size of stream

        keep_ratio: bool
            If True, width will change to fix with height ratio. w *= h_new / h_old

        fps: int
            (Default: -1 - autoset) FPS of output video

        duration: int
            (Default: 0 - infinite) Limited record time if set.

        preview: bool
            Show frame during process. If True.

        preview_size: tuple[int, int]
            Window previewer size.

        log_level: LogLevel
            Log level of ffmpeg
        """

        if output.startswith("pipe") and os.path.isfile(
                output) and not over_write:
            raise FileExistsError

        if not output_size:
            output_size = self.size

        if keep_ratio:
            output_size = (int(
                round(self.__meta.width *
                      (output_size[1] / self.__meta.height))), output_size[1])

        # file output settings
        output_options = {
            "pix_fmt": pix_fmt,
            "loglevel": log_level,
            "c:v": encoder,
            "crf": 27,
            'preset': 'veryfast',
            's': f'{output_size[0]}x{output_size[1]}'
        }

        if duration:
            output_options['t'] = duration

        if encoder == H265_ENCODER:
            output_options[
                "x265-params"] = f"log-level={log_level if log_level != 'quiet' else -1}"

        # handle FPS
        if fps > 0:
            # manual set
            output_options["r"] = fps
        elif self.is_stream:
            # sync with time
            output_options["vsync"] = "vfr"
        else:
            # same source file
            output_options["r"] = self.fps

        capture_output = self.__input_stream \
            .output(output, **output_options) \
            .overwrite_output()

        # pipe output settings if preview.
        if preview:
            preview_output_size = THUMBNAIL_RESOLUTION

            if keep_ratio:
                preview_output_size = (int(
                    round(self.__meta.width *
                          (preview_output_size[1] / self.__meta.height))),
                                       preview_output_size[1])

            pipe_output_opts = {
                "format": "rawvideo",
                "pix_fmt": RGB24,
                "loglevel": log_level,
                's': f'{preview_output_size[0]}x{preview_output_size[1]}'
            }

            if fps > 0:
                # manual set
                pipe_output_opts["r"] = fps
            elif self.is_stream:
                # sync with time
                pipe_output_opts["vsync"] = "vfr"
            else:
                # same source file
                pipe_output_opts["r"] = self.fps

            if duration:
                pipe_output_opts['t'] = duration

            pipe_output = self.__input_stream.output('pipe:',
                                                     **pipe_output_opts)
            capture_output = ffmpeg.merge_outputs(pipe_output, capture_output)

            capture_output = VideoIterator(capture_output,
                                           preview_output_size,
                                           is_stream=self.is_stream)
            window_name = f"Preview - {self.__meta.src}"

            for frame in capture_output:
                if not show_image(frame.decode(),
                                  window_name,
                                  windows_size=preview_size):
                    break

            destroy_windows(window_name)
            capture_output.stop()
        else:
            capture_output.run()
예제 #10
0
def create_outputs(
    source_input,
    name,
    framerate=None,
    watermark=True,
    verbose=False,
    dryrun=False,
):
    """Create output at multiple sizes (FHD and qHD)

    :param source_input: ffmpeg input node ready for scaling and conversion.
    :param name: name of the output.
    :param watermark: if True the default watermark will be added to the movie,
        if a tuple is provided the contained strings are used for the main an sub lines.
    :param verbose: if True output the ffmpeg CLI command which will be used.
    :param dryrun: if True the command will not be run.

    """

    fhd_input = source_input.filter_('scale',
                                     size='hd1080',
                                     force_original_aspect_ratio='increase')

    if watermark:
        if watermark is True:
            text = 'Arne de Laat'
            subtext = '153957 Photography'
        else:
            text, subtext = watermark
        watermarked_input = add_watermark(fhd_input,
                                          text,
                                          subtext,
                                          fontsize=32)
        split_input = watermarked_input.split()
    else:
        split_input = fhd_input.split()

    if framerate:
        output_options = {'r': framerate, **OUTPUT_OPTIONS}
    else:
        output_options = OUTPUT_OPTIONS

    output = ffmpeg.merge_outputs(
        # 1920x1080 (1920x1280)
        split_input[0].output(f'{name}.mp4', **output_options),
        # 960x540 (960x640)
        (split_input[1].filter_('scale',
                                size='qhd',
                                force_original_aspect_ratio='increase').output(
                                    f'{name}_960.mp4', **output_options)))

    if verbose:
        print('ffmpeg ' + ' '.join(output.get_args()))
        try:
            output.view(filename=f'{name}')  # Automatically suffixed with .pdf
        except ImportError:
            print('Install graphviz to generate the a signal graph')

    if not dryrun:
        output.global_args('-hide_banner').run(overwrite_output=True)

    return output
예제 #11
0
    def start_recording(self):
        """
        Start recording the stream defined on self.stream_uri
        :return: boolean if the recording started sucessfully
        """

        # if we are dealing with a USB camera or RTSP stream we
        # treat them differently, for example remove the socket TCP I/O timeout (stimeout)
        parsed_uri = urlparse(self.stream_uri)
        if parsed_uri.scheme == "rtsp":
            stream_input = ffmpeg.input(
                self.stream_uri,
                nostdin=None,
                use_wallclock_as_timestamps=1,
                stimeout=self._def_stimeout,
                fflags="+genpts",
                rtsp_transport='tcp')  # stimeout in microsecondss
        else:
            stream_input = ffmpeg.input(self.stream_uri)

        # store the files in segments to prevent corruption
        segment_fpath = self.prepare_filepath_for_segment(
            self.output_filepath, self._def_segment_fname_size)

        # ffmpeg -use_wallclock_as_timestamps 1 -fflags +genpts -rtsp_transport tcp -stimeout 3000000
        # -i rtsp://admin:[email protected]:554/stream0 -f segment -b:v 900k -an -flags +global_header -map 0
        # -map_metadata -1 -movflags +frag_keyframe+separate_moof+omit_tfhd_offset+empty_moov -reset_timestamps 1
        # -segment_format matroska
        # -segment_list /tmp/stored_streams/2.stream_192_168_3_22_554.2019-08-18.14.02.53_video_list.txt
        # -segment_list_type ffconcat -segment_time 20 -strict 2 -vcodec copy -use_wallclock_as_timestamps 1
        # -fflags +genpts /tmp/stored_streams/2.stream_192_168_3_22_554.2019-08-18.14.02.53-%03d.mkv -y

        output_arguments = [
            ("strict", 2),
            ("f", "segment"),
            ("map", 0),
            ("segment_time", self._def_segment_time),
            ("segment_format", self._def_segment_format),
            ("segment_list", self.segment_filelist),
            ("segment_list_type", "ffconcat"),
            ("vcodec", self._def_vcodec),
            ("video_bitrate", self._def_bitrate),
            ("flags", "+global_header"),
            ("reset_timestamps", 1),
            ("map_metadata", -1),
            ("use_wallclock_as_timestamps", 1),
            ("fflags", "+genpts"),
            ("movflags",
             "+frag_keyframe+separate_moof+omit_tfhd_offset+empty_moov"),
        ]

        # if there is no audio codec then use the an
        # flag to remove audio from the recorded stream
        if self._def_acodec != "":
            output_arguments.append(("acodec", self._def_acodec))
        else:
            output_arguments.append(("an", None))

        ffmpeg_output_streams = [
            ffmpeg.output(stream_input, segment_fpath,
                          **OrderedDict(output_arguments))
        ]

        output_streams = ffmpeg.merge_outputs(*ffmpeg_output_streams)
        output_streams = ffmpeg.overwrite_output(output_streams)

        debug_command = ffmpeg.compile(output_streams)
        print("ffmpeg command: {}".format(' '.join(debug_command)))

        self.start_time = time.time()
        self.proc = (
            # ALERT: https://stackoverflow.com/questions/16523746/ffmpeg-hangs-when-run-in-background
            # clean the stderr / stdout regularly to prevent this process for freezing
            ffmpeg.run_async(output_streams,
                             pipe_stdout=True,
                             pipe_stderr=True,
                             overwrite_output=True))

        start_timeout = time.time()
        is_reached_size = False
        while self.proc.poll() is None and time.time(
        ) - start_timeout < self._def_timeout_secs and not is_reached_size:
            try:
                if os.path.getsize(self.get_last_segment_output_filepath()
                                   ) > self._def_min_video_size_bytes:
                    is_reached_size = True
            except OSError as e:
                pass
            time.sleep(0.1)

        if self.proc.poll() is None:
            return True
        else:
            self.start_time = 0
            self.out, self.err = self.proc.communicate()
            return False
예제 #12
0
    def start_streams(self):
        # base source of video of camera
        # video was too late and after audio
        # fixed via nobuffer making video earlier https://stackoverflow.com/a/49273163
        source = ffmpeg.input('pipe:',
                              format='h264',
                              **{
                                  'r':
                                  self.camera_configuration.get_framerate(),
                                  'thread_queue_size': 20480,
                                  'fflags': 'nobuffer'
                              })

        audio = None
        if self.feature_states.get_STREAMING()[1]:
            # mic should be active
            # using nobuffer for same low latency as video
            # then applying async methods as in https://lzone.de/blog/Easily-fix-async-video-with-ffmpeg to sync audio to video
            audio = ffmpeg.input(
                self.camera_configuration.get_mic_input(),
                format='alsa',
                **{
                    'ac': 1,
                    "c:a": "pcm_s24le",
                    "sample_rate": 192000,
                    'thread_queue_size': 20480,
                    'fflags': 'nobuffer',
                    'async': 1
                }
            )  # reduce latency with no buffer then sync to video via async 1 # not needed and inacurate because changes 'itsoffset': MIC_OFFSET
        elif self.feature_states.get_STREAMING()[2]:
            # radio should be active
            audio = ffmpeg.input(
                self.camera_configuration.get_radio_url(), **{
                    'ac': 2,
                    'itsoffset': RADIO_OFFSET,
                    'thread_queue_size': 20480
                })

        # only video no audio to ml stream
        if audio is not None:
            ml_output_stream = ffmpeg.output(
                source,
                audio,
                self.camera_configuration.get_object_recognition_stream_url(),
                format='rtsp',
                **{
                    'fflags': 'nobuffer',
                    'threads': 2,
                    # copy of normal streaming audio output
                    'acodec': 'aac',
                    'ac': 1,
                    'ar': 44100,
                    'ab': '128k',
                    'af': 'highpass=200,lowpass=2100,volume=4,loudnorm',
                    'vcodec': 'copy',
                    'reconnect': 1,
                    'reconnect_at_eof': 1,
                    'reconnect_streamed': 1,
                    'reconnect_delay_max': 30
                })
        else:
            ml_output_stream = ffmpeg.output(
                source,
                self.camera_configuration.get_object_recognition_stream_url(),
                format='rtsp',
                **{
                    'fflags': 'nobuffer',
                    'threads': 2,
                    'vcodec': 'copy',
                    'reconnect': 1,
                    'reconnect_at_eof': 1,
                    'reconnect_streamed': 1,
                    'reconnect_delay_max': 30
                })

        stream_url = self.camera_configuration.get_stream_url()

        if audio is not None:
            # see these for audio implementation details since docs suck
            # https://github.com/kkroening/ffmpeg-python/issues/26
            # https://github.com/kkroening/ffmpeg-python/pull/45#issue-159702983
            streaming_output = ffmpeg.output(
                source,
                audio,
                stream_url,
                format='flv',
                **{
                    'threads': 3,
                    # audio high low filter here to reduce usb mic noise floor, then boost then normalize audio too
                    # afftdn noise reduction filter seems to be problem and too much making stream unstable  # ,afftdn
                    'acodec': 'aac',
                    'ac': 1,
                    'ar': 44100,
                    'ab': '128k',
                    'af': 'highpass=200,lowpass=2100,volume=4,loudnorm',
                    'vcodec': 'copy',
                    'reconnect': 1,
                    'reconnect_at_eof': 1,
                    'reconnect_streamed': 1,
                    'reconnect_delay_max': 30
                })
        else:
            streaming_output = ffmpeg.output(
                source,
                stream_url,
                format='flv',
                **{
                    'threads': 3,
                    # -acodec aac -ac 2 -ar 44100 -ab 128k
                    'vcodec': 'copy',
                    'reconnect': 1,
                    'reconnect_at_eof': 1,
                    'reconnect_streamed': 1,
                    'reconnect_delay_max': 30
                })

        stream = None
        if self.feature_states.get_OBJECT_DETECTION_STREAMING(
        ) and self.feature_states.get_STREAMING()[0]:
            # use almost recommmended way but not splititng and reencoding but merging output copy directly
            stream = ffmpeg.merge_outputs(streaming_output, ml_output_stream)
        elif self.feature_states.get_STREAMING()[0]:
            stream = streaming_output
        elif self.feature_states.get_OBJECT_DETECTION_STREAMING():
            stream = ml_output_stream

        if stream is not None:
            self.any_video_running = True
            self.stream_runner_p = stream.run_async(pipe_stdin=True)
            print(
                "Starting pushing frames from camera into ffmpeg stream stdin..."
            )
            self.camera.start_recording(
                self.stream_runner_p.stdin,
                format='h264',
                bitrate=self.camera_configuration.get_bitrate(),
                splitter_port=2
            )  #splitter port to also circular record at same time
        else:
            pass