예제 #1
0
    def overlay_layers(self, parent_split, overlay_split):
        output_splits = {'video': None, 'audio': None}
        self.logger.debug("overlay_layers")

        video_count = 0
        if parent_split['video'] is not None:
            video_count += 1
            output_splits['video'] = [
                parent_split['video'][0], parent_split['video'][1]
            ]
        if overlay_split['video'] is not None:
            video_count += 1
            output_splits['video'] = [
                overlay_split['video'][0], overlay_split['video'][1]
            ]
        if video_count > 1:
            try:
                self.logger.debug("overlay video")
                top_overlay_0 = ffmpeg.overlay(parent_split['video'][0],
                                               overlay_split['video'][0],
                                               eof_action='pass')
                bottom_overlay_0 = ffmpeg.overlay(parent_split['video'][1],
                                                  overlay_split['video'][1],
                                                  eof_action='repeat')
                output_splits['video'] = [top_overlay_0, bottom_overlay_0]
            except Exception as e:
                self.logger.exception(e)
                raise

        audio_count = 0
        if parent_split['audio'] is not None:
            audio_count += 1
            output_splits['audio'] = parent_split['audio']
        if overlay_split['audio'] is not None:
            audio_count += 1
            output_splits['audio'] = overlay_split['audio']
        if audio_count > 1:
            try:
                self.logger.debug("overlay audio")
                combined_audio = ffmpeg.filter(
                    [parent_split['audio'], overlay_split['audio']],
                    'amix',
                    inputs=2,
                    duration='longest')
                output_splits['audio'] = combined_audio
            except Exception as e:
                self.logger.exception(e)
                raise
        return output_splits
예제 #2
0
def ffmpegtask():

    VIDEO_URL = 'https://www.youtube.com/watch?v=dp8PhLsUcFE'
    RTMP_SERVER = 'rtmp://publish.dailymotion.com/publish-dm/x745s0j?auth=eIVE_72b5fc183ed5507672cdff8537d47885aead794f'

    stream_map = None
    stream1 = ffmpeg.input(get_manifest(VIDEO_URL), re=None)
    stream2 = ffmpeg.input('mosca_66.png')
    stream_ol = ffmpeg.overlay(stream1,
                               stream2,
                               x='main_w-overlay_w-50',
                               y='50')
    a1 = stream1.audio
    stream = ffmpeg.output(stream_ol,
                           a1,
                           RTMP_SERVER,
                           format='flv',
                           vcodec='libx264',
                           acodec='aac',
                           preset='medium',
                           g='120',
                           crf='23',
                           maxrate='4M',
                           bufsize='5M',
                           channel_layout='stereo')
    subp = ffmpeg.run(stream)
예제 #3
0
def restream(origin, server, stream_key):
    if 'youtu' in origin:
        try:
            origin = get_manifest(origin)
        except:
            logging.error("Error parseando url de youtube " + origin)

    stream_server = generate_url(server, stream_key)
    try:
        stream_map = None
        probe_decoded = probe(origin)
        video_stream = get_video_stream(probe_decoded)
        audio_stream = get_audio_stream(probe_decoded)
        stream1 = ffmpeg.input(origin)
        stream2 = ffmpeg.input('mosca_76.png')
        stream2 = ffmpeg.filter(stream2, 'scale', w='-1', h=video_stream[2])
        stream_ol = ffmpeg.overlay(stream1[str(video_stream[0])], stream2, x='main_w-overlay_w')
        stream_ol = ffmpeg.filter(stream_ol, 'fps', fps=25, round='up')
        a1 = stream1.audio
        stream1_audio = stream1[str(audio_stream)]
        if 'dailymotion' in server:
            stream = ffmpeg.output(stream_ol, stream1_audio, stream_server, format='flv', vcodec='libx264', acodec='aac', preset='veryfast', g='50', threads='2', s='1920x1080', crf='23', maxrate='4M', bufsize='5M', channel_layout='stereo')
        else:
            stream = ffmpeg.output(stream_ol, stream1_audio, stream_server, format='flv', vcodec='libx264', acodec='aac', preset='veryfast', g='50', threads='2', s='1280x720', crf='23', maxrate='4M', bufsize='5M', channel_layout='stereo')
        ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)
        set_complete()
    except ffmpeg.Error as e:
        e_decoded = e.stderr.decode('utf8')
        logging.error("Error de FFMPEG durante el streaming: " + e_decoded)
        set_complete() 
예제 #4
0
    def to_stream(self, start_timestamp: float, end_timestamp: float):
        main = self.main.to_stream(start_timestamp, end_timestamp)
        inside = self.inside.to_stream(start_timestamp, end_timestamp)
        crop_height = self.crop_height
        if crop_height is None:
            crop_height = self.crop_width * 9 // 16
        crop = inside.crop(x=self.crop_x,
                           y=self.crop_y,
                           width=self.crop_width,
                           height=crop_height)

        overlay_w = self.width
        overlay_h = crop_height * self.width // self.crop_width
        scaled = crop.filter('scale', overlay_w, -1)
        if self.opacity == 1:
            translucent = scaled
        else:
            translucent = scaled.filter('format',
                                        'rgba').filter('colorchannelmixer',
                                                       aa=self.opacity)
        overlay_margin = self.margin
        if self.location == Location.MIDDLE_RIGHT:
            overlay_x = 1920 - overlay_margin - overlay_w
            overlay_y = (1080 - overlay_h) // 2
        elif self.location == Location.TOP_CENTER:
            overlay_x = 1920 // 2 - overlay_w // 2
            overlay_y = overlay_margin
        elif self.location == Location.TOP_RIGHT:
            overlay_x = 1920 - overlay_margin - overlay_w
            overlay_y = overlay_margin
        else:
            raise ValueError(f'bad location: {self.location}')
        overlay = ffmpeg.overlay(main, translucent, x=overlay_x, y=overlay_y)
        return overlay
    def watermark(self,
                  image: FileDesc,
                  alpha: float = 1.0,
                  x: Expression = 0,
                  y: Expression = 0,
                  scale: float = 1.0,
                  angle: float = 0.0):
        """
        为视频/图片添加水印/字幕

        :param image: 水印/字幕图像
        :param alpha: 透明度
        :param x: 水平轴上的起始位置
        :param y: 竖直轴上的起始位置
        :param scale: 缩放
        :param angle: 顺时针旋转角度,角度制
        :returns: self
        """
        self.stream = ffmpeg.overlay(
            self.stream,
            Transform(image).scale(scale).rotate(angle).alpha(alpha).stream,
            x=x,
            y=y,
        )
        return self
예제 #6
0
def insert_vid(video, audio, video1, audio1, x, y, aud):
    video = ffmpeg.overlay(video, video1, x=x, y=y)
    if aud == 'main':
        return video, audio
    elif aud == 'added':
        return video, audio1
    elif aud == 'both':
        au = ffmpeg.filter([audio, audio1], 'amix')
        return video, au
예제 #7
0
파일: videos.py 프로젝트: samikhailov/music
 def create_cut_video(self):
     self.update_chorus_start()
     if not os.path.exists(self.cut_video_path):
         stream = ffmpeg.input(self.full_video_path, ss=self.global_track.chorus_start, t=CHORUS_LENGTH)
         stream = self._set_requirements(stream, CHORUS_LENGTH)
         titles = self._draw_titles(self.global_track.artist, self.global_track.title).setpts("PTS-STARTPTS+40")
         stream = ffmpeg.overlay(stream, titles, eof_action="pass").setpts("PTS-STARTPTS")
         output_file = ffmpeg.output(stream, self.cut_video_path)
         output_file.run()
예제 #8
0
 async def _watermark(self, ctx, vstream, astream, kwargs):
     x = kwargs['x']
     y = kwargs['y']
     w = kwargs['w']
     h = kwargs['h']
     watermark_stream = ffmpeg.input(kwargs['watermark_filepath'])
     if (w is not None and h is not None):
         watermark_stream = watermark_stream.filter('scale', w=w, h=h)
     vstream = vstream.filter('scale', h=320, w=-2).filter('setsar',
                                                           r='1:1')
     vstream = ffmpeg.overlay(vstream, watermark_stream, x=x, y=y)
     return (vstream, astream, {})
예제 #9
0
 def to_stream(self, start_timestamp, end_timestamp):
     dummy = self.video_1.to_stream(start_timestamp, end_timestamp)
     video_1 = self.video_1.to_stream(start_timestamp, end_timestamp)
     video_2 = self.video_2.to_stream(start_timestamp, end_timestamp)
     video_3 = self.video_3.to_stream(start_timestamp, end_timestamp)
     video_4 = self.video_4.to_stream(start_timestamp, end_timestamp)
     overlay = ffmpeg.overlay(dummy,
                              video_1.filter('scale', 960, -1),
                              x=0,
                              y=0)
     overlay = ffmpeg.overlay(overlay,
                              video_2.filter('scale', 960, -1),
                              x=960,
                              y=0)
     overlay = ffmpeg.overlay(overlay,
                              video_3.filter('scale', 960, -1),
                              x=0,
                              y=540)
     overlay = ffmpeg.overlay(overlay,
                              video_4.filter('scale', 960, -1),
                              x=960,
                              y=540)
     return overlay
예제 #10
0
    def start(self):
        Logger.LOGGER.log(Logger.TYPE_INFO,
                          'Starting Server, output to: {}'.format(self.output))

        in1 = ffmpeg.input('pipe:')
        v1 = ffmpeg.drawtext(in1['v'], '%{localtime:%R}',
                             x=c.SERV_DRAWTEXT_X,
                             y=c.SERV_DRAWTEXT_Y,
                             escape_text=False,
                             shadowcolor=c.SERV_DRAWTEXT_SHADOW_COLOR,
                             shadowx=c.SERV_DRAWTEXT_SHADOW_X,
                             shadowy=c.SERV_DRAWTEXT_SHADOW_Y,
                             fontsize=c.SERV_DRAWTEXT_FONT_SIZE,
                             fontfile=c.SERV_DRAWTEXT_FONT_FILE,
                             fontcolor=c.SERV_DRAWTEXT_FONT_COLOR
                             )
        v1 = ffmpeg.overlay(v1, self.overlay_file, x=c.OVERLAY_X, y=c.OVERLAY_Y)
        v1 = ffmpeg.overlay(v1, self.overlay_file_outline, x=c.OVERLAY_X, y=c.OVERLAY_Y)

        a1 = in1['a']
        joined = ffmpeg.concat(v1, a1, v=1, a=1)

        self.ff = ffmpeg.output(joined, self.output, vcodec='h264',
                                aspect=c.SERV_OUTPUT_ASPECT,
                                acodec=c.SERV_OUTPUT_ACODEC,
                                crf=c.SERV_OUTPUT_CRF,
                                preset=c.SERV_OUTPUT_PRESET,
                                format='flv',
                                pix_fmt='yuv420p'
                                )

        self.cmd = ['ffmpeg', '-re']+ffmpeg.get_args(self.ff)
        self.process = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=devnull, stderr=(
            None if SERVER_DEBUG else devnull))
        Logger.LOGGER.log(Logger.TYPE_INFO,
                    'Server Process Created')
        return self.process
예제 #11
0
def render(videofilename, bpm):

    looping = ffmpeg.input(
        "cat.mp4",
        stream_loop=-1)  # Import tom as a looping stream, tom is 426x240
    looping = ffmpeg.filter(
        looping, "colorkey", color="0x2bd71c", similarity=0.3,
        blend=0)  # This green I got myself from the tom video

    stream = ffmpeg.input(filename, ss=90,
                          t=25)  # Get start at 20s in and make the clip 20s
    video = stream.video
    audio = stream.audio

    looping = ffmpeg.filter(looping, "setpts", "{}*PTS".format(118 / bpm))
    video = ffmpeg.filter(video, 'scale', 1280, 720)  # Resize to 720p
    video = ffmpeg.overlay(stream, looping, shortest=1, y="0")
    stream = ffmpeg.output(video, audio, './output.mp4').overwrite_output()
    stream.run()
예제 #12
0
    def offset_layer(self, layer_data, i, resolution=None):
        resolution = resolution or self.streamInfo.resolution
        try:
            assert ('sourceFile'
                    in layer_data), "missing sourceFile in layer_data"
            assert ('timelineStart'
                    in layer_data), "missing timelineStart in layer_data"
        except Exception as e:
            self.logger.exception(e)
            raise
        source = layer_data['sourceFile']
        offset = layer_data['timelineStart']
        offset_seconds = float(offset) / 1000
        self.logger.debug("offset_layer - {}".format(offset_seconds))
        layer = ffmpeg.input(source)
        box = ffmpeg.input(
            '[email protected]:size={},format=rgba'.format(resolution),
            f='lavfi',
            t=str(offset_seconds + 2))
        splits = {'video': None, 'audio': None}
        self.streamInfo.set_file_name(source)
        if self.streamInfo.has_stream():
            video = layer.video.setpts(
                'PTS-STARTPTS+{}/TB'.format(offset_seconds))
            transparent_offset = ffmpeg.overlay(box,
                                                video,
                                                eof_action='repeat')
            video_split = transparent_offset.filter_multi_output('split')
            splits['video'] = [video_split[0], video_split[1]]

        self.streamInfo.set_file_name(source)
        self.streamInfo.set_stream('a')
        if self.streamInfo.has_stream():
            audio = layer.audio.filter('adelay',
                                       delays='{}:all=1'.format(offset))
            audio_split = audio.filter_multi_output('asplit')[0]
            splits['audio'] = audio_split
        self.total_time += offset / 1000
        return splits
예제 #13
0
    async def _greenscreen(self, ctx, vstream, astream, kwargs):
        green_stream = ffmpeg.input(kwargs['first_vid_filepath'])
        vgreen = green_stream.video
        agreen = green_stream.audio

        vgreen = (
            vgreen
            .filter('scale', w=480, h=320)
            .filter('setsar', r='1:1')
            .filter('colorkey', color=kwargs['color'], similarity=kwargs['similarity'])
        )
        vstream = (
            vstream
            .filter('scale', w=480, h=320)
            .filter('setsar', r='1:1')
        )
        vstream = ffmpeg.overlay(vstream, vgreen, x=0, y=0)
        astream = (
            ffmpeg
            .filter([astream, agreen], 'amix', dropout_transition=4000)
            .filter('volume', volume=2, precision='fixed')
        )

        return vstream, astream, {}
예제 #14
0
    def __enter__(self):
        global_args = []
        ffinput = ffmpeg.input('pipe:',
                               framerate=self.fps,
                               format='rawvideo',
                               pix_fmt='rgb32',
                               hwaccel='auto',
                               s='{}x{}'.format(self.width, self.height))

        if self.quiet:
            global_args = ['-hide_banner', '-nostats', '-loglevel', 'fatal']

        if self.multi_output:
            split = ffinput.filter_multi_output('split')
            raw_alpha = ffmpeg.filter_(
                split.stream(1), 'lutrgb', **{
                    'r': 'maxval',
                    'g': 'maxval',
                    'b': 'maxval'
                })
            key_video = ffmpeg.overlay(split.stream(2), raw_alpha)
            out_key = key_video.output(self.key_out_filename)
            out_main = split.stream(3).output(self.out_filename,
                                              **self.options)

            self.proc = (ffmpeg.merge_outputs(
                out_main,
                out_key).global_args(*global_args).run_async(pipe_stdin=True))

        else:

            self.proc = (ffinput.output(
                self.out_filename,
                **self.options).overwrite_output().global_args(
                    *global_args).run_async(pipe_stdin=True))
        return self.proc
예제 #15
0
    def render(self, speedrun=False):  # NOQA
        """This is how we generate a file directly:
            Before the for loop we setup and read inputps
            The first for loop generates the ffmpeg options to
            The second for loop layers the videos
            Where we run ffmpeg_out, we start the real rendering
        """
        # FIXME: this could be done in the constructor
        _, outFile, self.frame_rate, layers, resolution = self.jsonReader.get_data(
            self.json_file)

        if resolution:
            self.streamInfo.resolution = resolution
        frame_rate = str(self.streamInfo.frame_rate)
        # FIXME: classwide outfile does not exist in export
        self.outFile = outFile

        flat_layers = [item for sublist in layers for item in sublist]
        operation_data = []
        self.total_layers = len(flat_layers) + 1

        try:
            assert (self.total_layers >
                    1), 'cannot run operation with 0 layers'
        except Exception as e:
            self.logger.exception(e)
            raise

        for i, layer in enumerate(flat_layers):
            self.sounded_speed = 1
            self.silent_speed = None
            opped_layer = self.apply_operation(layer, i, frame_rate,
                                               resolution, speedrun)
            self.tmp_files.append(opped_layer['sourceFile'])
            operation_data.append(opped_layer)

        output = []
        parent = self.offset_layer(operation_data[-1], i, resolution)
        reversed_operations = reversed(operation_data)
        if len(operation_data) > 1:
            for i, op in enumerate(reversed_operations):
                if i > 0:
                    overlay = self.offset_layer(op, i, resolution)
                    overlayed = self.overlay_layers(parent, overlay)
                    parent = overlayed
                if self.streamInfo.total_time > self.longest_time:
                    self.longest_time = self.streamInfo.total_time
            try:
                self.logger.debug("final overlay -\n{}\n{}".format(
                    parent['video'][1], parent['video'][0]))
                parent['video'][0] = ffmpeg.overlay(parent['video'][1],
                                                    parent['video'][0],
                                                    eof_action='pass')
            except Exception as e:
                self.logger.exception(e)
                raise
            self.streamInfo.total_time = self.longest_time

        try:
            self.logger.debug("creating final output")
            if parent['video'] and len(parent['video']) > 0:
                output_video = parent['video'][0].filter('fps',
                                                         fps=str(frame_rate))
                output.append(output_video)
            if parent['audio'] is not None:
                output.append(parent['audio'])

            self.ffmpeg_out(self.total_layers - 1,
                            *output,
                            outFile,
                            override_nb_frames=True)

        except Exception as e:
            self.logger.exception(e)
            raise

        if self.clean_tmp:
            try:
                self.logger.debug("cleaning -\n{}".format(
                    json.dumps(self.tmp_files)))
                self.clean_tmp_files(self.tmp_files)
            except Exception as e:
                self.logger.exception(e)
                raise
        self.logger.debug("COMPLETE\n")
        self.logger.info("")
def buildpreroll(stream, filelocation):
    titleoffset = ((len(name) * 33) / 2) - 7
    if titleoffset > 716:
        title = textwrap.fill(name, width=40, break_long_words=False)
        titlenl = title.find("\n")
        titleoffset = ((titlenl * 33) / 2) - 7
    description = textwrap.fill(summary, width=22, break_long_words=False)
    num_of_lines = description.count("\n")
    if num_of_lines > 22:
        descriptionSize = 580 / num_of_lines
    else:
        descriptionSize = 26
    sidebar = ffmpeg.input(
        "{}overlays/prerolloverlay.mov".format(container_folder))
    poster = ffmpeg.input("{}poster.jpg".format(container_folder))
    fadeout = ffmpeg.input("{}overlays/fadeout.mov".format(container_folder))
    titlefont = "{}fonts/Bebas-Regular.ttf".format(container_folder)
    descriptionfont = "{}fonts/Roboto-Light.ttf".format(container_folder)
    poster = ffmpeg.filter(poster, "scale", 200, -1)
    preroll = ffmpeg.input("{}".format(filelocation), ss=10, t=Preroll_length)
    preroll = ffmpeg.filter(preroll, "scale", 1600, -1)
    prerollaudio = ffmpeg.input("{}prerollaudio.mp3".format(container_folder))
    preroll = ffmpeg.overlay(sidebar, preroll, x=300, y=125)
    preroll = ffmpeg.overlay(preroll, poster, x=40, y=195, enable="gte(t,1)")
    if CriticRating == "":
        preroll = ffmpeg.drawtext(
            preroll,
            text="Audiance Rating: {}%".format(AudienceRating),
            fontfile=titlefont,
            x=3,
            y=150,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=36,
            enable="gte(t,1)",
        )
    elif AudienceRating == "":
        preroll = ffmpeg.drawtext(
            preroll,
            text="Audiance Rating: {}%".format(CriticRating),
            fontfile=titlefont,
            x=3,
            y=150,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=36,
            enable="gte(t,1)",
        )
    elif CriticRating == "" and AudienceRating == "":
        print("we have no ratings available")
    else:
        preroll = ffmpeg.drawtext(
            preroll,
            text="Audiance Rating: {}%".format(AudienceRating),
            fontfile=titlefont,
            x=3,
            y=165,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=32,
            enable="gte(t,1)",
        )

        preroll = ffmpeg.drawtext(
            preroll,
            text="Critic Rating: {}%".format(CriticRating),
            fontfile=titlefont,
            x=3,
            y=135,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=32,
            enable="gte(t,1)",
        )
    preroll = ffmpeg.drawtext(
        preroll,
        text=name,
        fontfile=titlefont,
        x=(1106 - titleoffset),
        y=20,
        escape_text=True,
        fontcolor="0xFFFFFF@0xff",
        fontsize=76,
        enable="gte(t,1)",
    )
    preroll = ffmpeg.drawtext(
        preroll,
        text=description,
        fontfile=descriptionfont,
        x=3,
        y=500,
        escape_text=True,
        fontcolor="0xFFFFFF@0xff",
        fontsize=descriptionSize,
        enable="gte(t,1)",
    )
    preroll = ffmpeg.overlay(preroll, fadeout)
    preroll = ffmpeg.output(
        prerollaudio, preroll,
        ("{}prerolls/{} Preroll.mp4".format(container_folder, name)))
    ffmpeg.run(preroll)
    dirListing = os.listdir("{}prerolls/".format(container_folder))
    full_path = ["Prerolls/{0}".format(x) for x in dirListing]
    if len(dirListing) > 26:
        oldest_file = min(full_path, key=os.path.getctime)
        os.remove(oldest_file)
        plexsetting = re.sub("{}{}".format(container_folder, oldest_file), "",
                             plexsetting)
    preroll_list = (';{}prerolls/'.format(folder)).join(
        os.listdir("{}prerolls/".format(container_folder)))
    preroll_list = (";{}{}".format(folder, preroll_list))
    print(preroll_list)
    plex.settings.get("cinemaTrailersPrerollID").set(preroll_list)
    plex.settings.save()
    os.remove("{}poster.jpg".format(container_folder))
    os.remove("{}prerollaudio.mp3".format(container_folder))
    os.remove("{}".format(filelocation))
    print("done!")
예제 #17
0
def handle_stream(camera_id):
    try:
        camera = Camera.objects.get(pk=camera_id)
    except Camera.DoesNotExist:
        raise CommandError(f'Camera "{camera_id}" does not exist')

    if not camera.enabled:
        print(f"'{camera.name}' is disabled.")
        return

    stream_url = camera.urls()[0]

    print(f"{camera_id}: Probing camera...")
    probe = ffmpeg.probe(stream_url)
    print(f"{camera_id}: Probe completed.")

    video_stream = next(
        (stream
         for stream in probe["streams"] if stream["codec_type"] == "video"),
        None,
    )
    if video_stream is None:
        raise CommandError(f"{camera_id}: No video stream found during probe.")

    codec_name = video_stream["codec_name"]
    width = int(video_stream["width"])
    height = int(video_stream["height"])
    r_frame_rate_parts = video_stream["r_frame_rate"].split("/")
    r_frame_rate = int(r_frame_rate_parts[0]) / int(r_frame_rate_parts[1])

    print()
    print(f"{camera_id}: Stream configuration:")
    print(f"{camera_id}: - Codec:       {codec_name}")
    print(f"{camera_id}: - Size:        {width}x{height}")
    print(f"{camera_id}: - Frame rate:  {r_frame_rate}")

    ds = get_detection_settings(camera)

    drawtext_enabled = True if camera.overlays.count() > 0 else False
    drawbox_enabled = (ds["md_visualize"] or ds["od_visualize"]
                       or ds["fd_visualize"] or ds["ld_visualize"])
    detect_enabled = (drawbox_enabled or ds["md_enabled"] or ds["od_enabled"]
                      or ds["fd_enabled"] or ds["ld_enabled"])

    print()
    print(f"{camera_id}: Feature configuration:")
    print(f"{camera_id}: - Detection:       {detect_enabled}")
    print(f"{camera_id}:   - Visualization: {drawbox_enabled}")
    print(f"{camera_id}: - Text overlay:    {drawtext_enabled}")

    decode_enabled = detect_enabled
    overlay_enabled = drawtext_enabled and drawbox_enabled
    encode_enabled = drawtext_enabled or drawbox_enabled
    transcode_enabled = not encode_enabled
    copy_enabled = transcode_enabled and (codec_name == "h264"
                                          or codec_name == "hevc")

    print()
    print(f"{camera_id}: Pipeline configuration:")
    print(f"{camera_id}: - Decode:  {decode_enabled}")
    print(f"{camera_id}: - Merge:   {overlay_enabled}")
    print(f"""{camera_id}: - Output:  {'Encode' if encode_enabled
        else ('Copy' if copy_enabled else 'Transcode')}""")

    # TODO: Add appropriate hardware acceleration
    # Intel/AMD:
    #   -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format vaapi
    #   -i <input> -c:v h264_vaapi <output>
    # Nvidia:
    #   -hwaccel cuda -hwaccel_output_format cuda
    #   -i <input> -c:v h264_nvenc <output>

    rawvideo_params = {
        "format": "rawvideo",
        "pix_fmt": "rgb24",
        "s": f"{width}x{height}",
    }

    hls_params = {
        "flags": "+cgop",
        "g": r_frame_rate,
        "hls_time": 1,
        "hls_list_size": 900,
        "hls_flags": "delete_segments",
    }

    mp4_params = {
        "movflags": "+faststart",
    }

    stream_dir = f"{settings.STORAGE_DIR}/stream/{camera.id}"
    record_dir = f"{settings.STORAGE_DIR}/record/{camera.id}"
    makedirs(stream_dir, exist_ok=True)
    makedirs(record_dir, exist_ok=True)

    fifo_path = mkfifotemp("h264")

    output = ffmpeg.input(
        stream_url,
        **({
            "rtsp_transport": "tcp"
        } if camera.camera_type.streams.all()[0].force_tcp else {}),
    )
    outputs = []

    drawtext = None
    drawbox = None

    if decode_enabled:
        outputs.append(output.output("pipe:", **rawvideo_params))
    if drawtext_enabled:
        drawtext = output.drawtext("Hello, world!")
        output = drawtext
    if drawbox_enabled:
        drawbox = ffmpeg.input("pipe:", **rawvideo_params)
        output = drawbox
    if overlay_enabled:
        output = ffmpeg.overlay(drawtext, drawbox)

    if encode_enabled:
        split = output.filter_multi_output("split")
        inputs = [split.stream(0), split.stream(1)]
    else:
        inputs = [output, output]

    outputs.append(inputs[0].output(
        f"{stream_dir}/out.m3u8",
        vcodec="copy" if copy_enabled else "h264_vaapi",
        **hls_params,
    ))
    outputs.append(inputs[1].output(
        fifo_path,
        vcodec="copy" if copy_enabled else "h264").overwrite_output())

    main_cmd = ffmpeg.merge_outputs(*outputs)
    main_cmd = main_cmd.global_args("-hide_banner", "-loglevel", "error")

    print()
    print(f"{camera_id}: Starting stream...")
    main_process = main_cmd.run_async(pipe_stdin=True, pipe_stdout=True)
    print(f"{camera_id}: Started stream.")

    print()
    print(f"{camera_id}: Starting segmented recorder...")
    record_process = Process(
        target=segment_h264,
        args=(
            camera,
            fifo_path,
            f"{record_dir}/VID_%Y%m%d_%H%M%S.mp4",
            mp4_params,
        ),
        name=f"'{camera.name}'-record",
    )
    record_process.start()
    print(f"{camera_id}: Started segmented recorder.")

    manual_exit = False

    try:
        if decode_enabled:
            print()
            print(f"{camera_id}: Starting overlay loop...")

            while main_process.poll() is None:
                in_bytes = main_process.stdout.read(width * height * 3)
                if not in_bytes:
                    break
                frame = np.frombuffer(in_bytes,
                                      np.uint8).reshape([height, width, 3])

                if detect_enabled:
                    # TODO: Do detection on frame
                    pass

                if drawbox_enabled:
                    # TODO: Do drawbox on frame
                    pass

                    main_process.stdin.write(frame.astype(np.uint8).tobytes())

        else:
            print()
            print(f"{camera_id}: Waiting for end of stream...")
            main_process.wait()

        print(f"{camera_id}: Stream ended. Status: {main_process.returncode})")

    except KeyboardInterrupt:
        manual_exit = True

    print()
    print(f"{camera_id}: Stopping stream and segmented recorder...")

    main_process.terminate()
    kill(record_process.pid, SIGINT)
    record_process.join()

    camera.last_ping = None
    camera.save()

    print(f"{camera_id}: All done.")

    if not manual_exit:
        exit(2)
예제 #18
0
파일: test2.py 프로젝트: sqr/restream_flask
    ydl_opts = {'format': 'best'}
    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
        peine = ydl.extract_info(video_url, download=False)

    return peine.get('url')


VIDEO_URL = 'https://www.youtube.com/watch?v=dp8PhLsUcFE'
RTMP_SERVER = 'rtmp://a.rtmp.youtube.com/live2/r1mr-8fra-czhb-agqe'

try:
    stream_map = None
    stream1 = ffmpeg.input(get_manifest(VIDEO_URL), re=None)
    stream2 = ffmpeg.input('mosca_65.png')
    stream_ol = ffmpeg.overlay(stream1,
                               stream2,
                               x='main_w-overlay_w-50',
                               y='50')
    a1 = stream1.audio
    stream = ffmpeg.output(stream_ol,
                           a1,
                           RTMP_SERVER,
                           format='flv',
                           vcodec='libx264',
                           acodec='aac',
                           preset='medium',
                           g='120',
                           crf='23',
                           maxrate='4M',
                           bufsize='5M',
                           channel_layout='stereo')
    print(stream.get_args())
예제 #19
0
def create_single_tweet(pos, tweet, unique_code):
    stream = ffmpeg.input(f'{parpath}/img/white.jpg',
                          pattern_type='glob',
                          framerate=1)

    stream = ffmpeg.overlay(stream,
                            ffmpeg.input(tweet.profile_pic),
                            x=100,
                            y=75)

    stream = ffmpeg.drawtext(stream,
                             text=tweet.name,
                             font=f"{parpath}/fonts/OpenSansEmoji.ttf",
                             fontsize=25,
                             box=1,
                             boxborderw=15,
                             escape_text=True,
                             x=200,
                             y=50)

    stream = ffmpeg.drawtext(stream,
                             text=tweet.username,
                             font=f"{parpath}/fonts/OpenSansEmoji.ttf",
                             fontsize=25,
                             box=1,
                             boxborderw=15,
                             escape_text=True,
                             x=200,
                             y=100)

    stream = ffmpeg.drawtext(stream,
                             text=tweet.time_stamp,
                             font=f"{parpath}/fonts/OpenSansEmoji.ttf",
                             fontsize=25,
                             box=1,
                             boxborderw=15,
                             escape_text=True,
                             x=1200,
                             y=50)

    wrapped_tweet = wrap(tweet.text, 90)

    # The y value where the text begins
    vertical_y = 200

    for i, line in enumerate(wrapped_tweet):
        stream = ffmpeg.drawtext(stream,
                                 text=line,
                                 fontfile=f"{parpath}/fonts/OpenSansEmoji.ttf",
                                 fontsize=28,
                                 box=1,
                                 boxborderw=15,
                                 escape_text=True,
                                 x=200,
                                 y=200 + (50 * i))
        # Remember the offset for each new line of text
        vertical_y = vertical_y + 50

    num_images = len(tweet.images)

    if num_images != 0:
        for position in range(0, num_images):
            # resize the image and return the location
            # The order of images depebds on the number of images
            url = resize_image(tweet.images[position], unique_code, position,
                               "link")

            if position < 2:
                stream = ffmpeg.overlay(
                    stream,
                    ffmpeg.input(url),
                    x=200 + (position * 400),
                    # Incorporate the offset and start below the final
                    # line of text
                    y=vertical_y)
            else:
                stream = ffmpeg.overlay(
                    stream,
                    ffmpeg.input(url),
                    x=200 + ((position - 2) * 400),
                    # Start another row of pictures
                    y=vertical_y + 300)

    stream = ffmpeg.output(stream,
                           f'{parpath}/videos/{unique_code}-{pos}.mp4',
                           loglevel='panic')
    ffmpeg.run(stream)
예제 #20
0
    stream2 = ffmpeg.setpts(stream2, 'PTS+%s/TB' % (start_offset - 1))
    for i in xrange(0, len(dive_profile)):
        string = annotator.next()
        if (i + 1) < len(dive_profile):
            enable_str = 'between(t,%s,%s)' % (i + start_offset,
                                               i + 1 + start_offset)
        else:
            enable_str = 'gte(t,%s)' % (i + start_offset)
        stream = ffmpeg.drawtext(stream,
                                 string,
                                 x=50,
                                 y=50,
                                 fontfile=fontfile,
                                 fontsize=70,
                                 escape_text=False,
                                 shadowcolor='Black',
                                 shadowx=3,
                                 shadowy=3,
                                 start_number=100,
                                 enable=enable_str,
                                 fontcolor='WhiteSmoke')
    stream = ffmpeg.overlay(stream,
                            stream2,
                            x=50,
                            y=500,
                            enable='gte(t,%s)' % (start_offset - 1))
    stream = ffmpeg.output(stream, audio, output)
    stream = ffmpeg.overwrite_output(stream)
    ffmpeg.run(stream)
예제 #21
0
def insert_pic(video, audio, path, x, y):
    f = ffmpeg.input(str(pathlib.Path(path).absolute()))
    pic_vid = f.video
    video = ffmpeg.overlay(video, pic_vid, x=x, y=y)
    return video, audio
예제 #22
0
파일: fmpg.py 프로젝트: Arnav-Ajay/ffmpeg
        output = ffmpeg.output(input_audios[0], output_filename, **{
            'b:a': '48k'
        }).run()

    else:
        output = ffmpeg.output(input_videos[0], input_audios[0],
                               output_filename, **{
                                   'b:v': '48k',
                                   'b:a': '48k'
                               }).run()

#    for more than one input files
else:
    overlay = ffmpeg.overlay(base_image,
                             input_videos[0],
                             x=0,
                             y=0,
                             eof_action='pass')
    n = len(input_videos)
    n -= 1
    for i in range(1, len(input_videos)):

        cord = r.get_cord(i - 1, n)
        overlay = ffmpeg.overlay(overlay,
                                 input_videos[i],
                                 x=cord[0],
                                 y=cord[1],
                                 eof_action='pass')

    #    for num of input audios less than 3; returns audio
    audio = p.check_num_audios(input_audios)
def render_video(event, context):


    # Get the document in firestore that has been updated
    path_parts = context.resource.split('/documents/')[1].split('/')
    collection_path = path_parts[0]
    document_path = '/'.join(path_parts[1:])

    affected_doc = client.collection(collection_path).document(document_path)
    
    id = event['value']['fields']['id']['stringValue']

    encoding_started = event['value']['fields']['encoding_started']['booleanValue']
    download_finished = event['value']['fields']['download_finished']['booleanValue']
    bpm_finished = event['value']['fields']['bpm_finished']['booleanValue']
    

    if (not encoding_started) and download_finished and bpm_finished:

        affected_doc.update({
            u'encoding_started': True
        })

        temp_local_cat_filename = "/tmp/cat.mp4"
        temp_local_input_filename = "/tmp/input.mp4"
        temp_local_output_filename = "/tmp/" + id + ".mp4"
        
        # Get the cat mp4
        cat_mp4_blob = storage_client.bucket(ASSETS_BUCKET_NAME).get_blob("cat.mp4")
        cat_mp4_blob.download_to_filename(temp_local_cat_filename)

        print(f"downloaded cat mp4 to { temp_local_cat_filename }")

        # Get the input mp4
        input_mp4_blob = storage_client.bucket(DOWNLOAD_BUCKET_NAME).get_blob(id + ".mp4")
        input_mp4_blob.download_to_filename(temp_local_input_filename)

        print(f"downloaded input mp4 to { temp_local_input_filename }")

        # Get the bpm
        bpm = float(event['value']['fields']['bpm']['doubleValue'])
        print(f"bpm is { bpm }")


        # Apply ffmpeg
        print("starting render")
        
        
        
        looping = ffmpeg.input(temp_local_cat_filename)#, stream_loop = -1) # Import tom as a looping stream, tom is 426x240
        looping = ffmpeg.filter(looping, "colorkey", color="0x52f21f", similarity=0.5, blend=0.1) # This green I got myself from the tom video
        looping = ffmpeg.filter(looping, "loop", loop=10)
        def get_length(filename):
            result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
                                     "format=duration", "-of",
                                     "default=noprint_wrappers=1:nokey=1", filename],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT)
            return float(result.stdout)
        duration = get_length(temp_local_input_filename)
        if duration <50 and duration > 30:
            start = duration-30
            length = 25
        elif duration < 30:
            start = 2
            length = duration - 4
        else:
            start = (duration/2)-15
            length = 30
        stream = ffmpeg.input(temp_local_input_filename, ss=start, t=length) # Get start at 20s in and make the clip 20s
        video = stream.video
        audio = stream.audio

        looping = ffmpeg.filter(looping, "setpts", "{}*PTS".format(118 /bpm))


        video = ffmpeg.filter(video, 'scale', 1280,720) # Resize to 720p
        video = ffmpeg.overlay(stream, looping, shortest=1, y = "0")
        stream = ffmpeg.output(video, audio, temp_local_output_filename).overwrite_output()
        stream.run()

        print("render finished")

        

        print("uploading result")

        distribution_bucket = storage_client.bucket(DISTRIBUTION_BUCKET_NAME)
        upload_blob = distribution_bucket.blob(id + ".mp4")
        upload_blob.upload_from_filename(temp_local_output_filename)

        print("result uploaded")
        
        # Get the download url

        # Make the blob public

        upload_blob.make_public()
        download_url = upload_blob.public_url

        affected_doc.update({
            u'encoding_finished': True,
            u'url' : download_url
        })

        # Clean up by deleting all the temp files

        os.remove(temp_local_cat_filename)
        os.remove(temp_local_input_filename)
        os.remove(temp_local_output_filename)

        print("cleaned up")
예제 #24
0
def segment_video_mutliprocess(segment_video_infos):
    segment_video_start_time = time.time()

    segment = segment_video_infos['segment']
    start_timestamp = segment_video_infos['start_timestamp']
    segment_video_path = "tmp/" + start_timestamp + "/video_" + str(
        segment).zfill(3) + ".mp4"
    segment_video_input_file = config.background_video
    segment_video = ffmpeg.input(segment_video_input_file)
    counter = 0
    for segment_video_info in segment_video_infos['data']:
        counter += 1

        segment_start = segment_video_info['segment_start']
        step_note = segment_video_info['step_note']
        note_instrument = segment_video_info['note_instrument']
        duration = segment_video_info['duration']
        octave = segment_video_info['octave']

        if config.one_video_only['enabled']:
            note_file = note_instrument + '/video.mp4'
        else:
            note_file = note_to_file(octave, step_note, note_instrument)

        segment_video_output_file = segment_video_path
        # if os.path.exists(segment_video_path):
        #     os.rename(segment_video_path, segment_video_path + '.old')
        #     segment_video_input_file = segment_video_path + '.old'
        # else:
        #     segment_video_input_file = config.background_video

        split_preventing_offset = 1 / float(counter + 1000)
        itsoffset = segment_start / float(1000) + split_preventing_offset
        x = random.randint(config.subvideo['x_min'], config.subvideo['x_max'])
        y = random.randint(config.subvideo['y_min'], config.subvideo['y_max'])
        end = config.segment_duration / float(1000)
        trim_end = itsoffset + duration

        temp_video = ffmpeg.input(note_file, itsoffset=itsoffset)
        temp_video = ffmpeg.trim(temp_video, start=0, end=trim_end)

        if config.green_screen['enabled'] == True:
            gs_color = config.green_screen['color']
            gs_similarity = config.green_screen['similarity']
            gs_blend = config.green_screen['blend']
            temp_video = ffmpeg.filter(temp_video,
                                       'colorkey',
                                       color=gs_color,
                                       similarity=gs_similarity,
                                       blend=gs_blend)
            #output sliced video to temp
        segment_video = ffmpeg.overlay(segment_video,
                                       temp_video,
                                       x=x,
                                       y=y,
                                       eof_action='pass')
        segment_video = ffmpeg.trim(segment_video, start=0, end=end)
        # Process a batch of video
        if counter > config.video_batch['limit']:
            continue
            print(
                'Warning: The number of video to be process is too much ( larger than '
                + str(config.video_batch['limit']) +
                ' ), maybe need to change speed_control?')
        #print ('segment_video_info', segment_video_info)
    output_segment_video(segment_video, segment_video_path)
    # segment_video = ffmpeg.output(segment_video, segment_video_path, preset='ultrafast',
    #     loglevel=config.ffmpeg['error_level'], threads=config.ffmpeg['threads'])
    # ffmpeg.run(segment_video)

    #os.system("ffmpeg -i "+segment_video_input_file+" -itsoffset "+str(segment_start/float(1000))+" -i "+temp_mp4+" -loglevel panic -preset ultrafast -filter_complex '[1:v]colorkey=0x"+green_screen_color+":"+green_screen_similarity+":"+gs_blend+"[ckout];[0:v][ckout]overlay[out]' -map '[out]' " + segment_video_output_file)

    segment_video_end_time = time.time()
    segment_video_elapsed = segment_video_end_time - segment_video_start_time

    print('Video process segment: ' + str(segment) +
          ' done, total duration: ' +
          str(datetime.timedelta(seconds=segment_video_elapsed)))
    return segment_video_path
예제 #25
0
def blur_softly(matrix: List[Dict[str, Any]], video_in, video_out=""):
    """ Запускает обработку видео через ffmpeg

    Обработываемых областей может быть несколько, они могут пересекаться по координатам и времени.
    Для непересекающихся областей порядок их следования в массиве не имеет значения.
    Параметры разных областей друг на друга не влияют, массив и словари не модифицируются при работе.

    Если путь к результату не задан, то он помещается рядом с исходным файлом, с припиской к имени "_blurred".

    Обрабатываемая область описывается тремя диапазонами: ширина, высота и продолжительносью, и двумя параметрами обработки: радиусом и степенью.
    Ширина может быть задана любыми двумя параметрами из списка: 'left', 'right', 'width'
    Высота может быть задана любыми двумя параметрами из списка: 'top', 'bottom', 'height'
    Продолжительность тоже задаётся двумя параметрами, но их имена и значения зависят от желаемого спасоба измерения времени.

    Для задания продолжительности можно использовать разные единицы измерения: секунды или кадры.
    Секунды могут быть дробными, задаются в виде чисел с плавающей точкой.
    Продолжительность в секундах задаётся любыми двумя параметрами из списка: 'timestart', 'timeend', 'length'
    Кадры могут быть только целыми, начальный кадр имеет номер 0.
    Продолжительность в кадрах задаётся любыми двумя параметрами из списка: 'framestart', 'frameend', 'length'

    Радиус размытия по умолчанию ставится как четверть от меньшего из размеров области.
    Задать его явно можно через параметр 'radius'.
    При превышении допустимого значения ffmpeg откажется от работы и выведет соответствующее сообщение.

    Степень размытия задаётся через параметр 'power'.
    По умолчанию его значение 5.

    :param matrix: список областей обработки
    :param video_in: путь к исходному видео
    :param video_out: путь к результату
    """
    input_file = ffmpeg.input(video_in)

    source_stream = input_file.video
    st0 = source_stream

    for d in matrix:
        top, height = _calc_size(d, 'top', 'bottom', 'height')
        left, width = _calc_size(d, 'left', 'right', 'width')
        if 'timestart' in d or 'timeend' in d:
            start, length = _calc_size(d, 'timestart', 'timeend', 'length')
            time_unit = 't'
        else:
            start, length = _calc_size(d, 'framestart', 'frameend', 'length')
            time_unit = 'n'

        radius = d.get('radius')
        if radius is None:
            radius = min(width, height) / 4
        power = d.get('power')
        if power is None:
            power = 5

        enable = f'between({time_unit},{start},{start + length})'

        st1 = ffmpeg.crop(source_stream, left, top, width, height)
        st2 = ffmpeg.filter_(st1, 'boxblur', lr=radius, lp=power, enable=enable)
        st0 = ffmpeg.overlay(st0, st2, x=left, y=top, enable=enable)

    if video_out == "":
        video_in = PurePath(video_in)
        video_out = str(video_in.parent.joinpath(video_in.stem + "_blurred" + video_in.suffix))

    output = ffmpeg.output(st0, input_file.audio, video_out,
                           vcodec='libx264',
                           acodec='copy',
                           crf=17,
                           preset='fast',
                           tune='stillimage')

    ffmpeg.run(output, overwrite_output=True)
예제 #26
0
    "r": 24,
    "crf": 21,
    "b:v": "800k",
    "ac": 1,  # Mono
    # "b:a": "128k",
    "acodec": "aac",  # copy
}

preFileStream = ffmpeg.input(PRE_FILE, **preFile_input_args)
inputStream = ffmpeg.input(INPUT_FILE_PATH, **input_args)
a1 = preFileStream.audio
a2 = inputStream.audio

inputStream = ffmpeg.filter(inputStream, 'scale', size='1920x1080', force_original_aspect_ratio='decrease')
inputStream = ffmpeg.filter(inputStream, 'pad', '1920', '1080', '(ow-iw)/2', '(oh-ih)/2')
inputStream = ffmpeg.overlay(inputStream, OVERLAY_FILE)
inputStream = ffmpeg.filter(inputStream, "fade", type='in', start_time=0, duration=1)
# inputStream = ffmpeg.filter(inputStream, "fade", type='out', duration=1)

stream = ffmpeg.concat(preFileStream, a1, inputStream, a2, v=1, a=1)

OUTPUT_PATH = "../output/" + "[" + Utils.sToTimeFormat(TRIM_START, "%H:%M:%S.%f") + "-" + Utils.sToTimeFormat(TRIM_END,
                                                                                                              "%H:%M:%S.%f") + "]" + INPUT_FILE_NAME
if not os.path.exists("../output/"):
    os.makedirs("../output/")
# if os.path.exists(OUTPUT_PATH):
#  os.remove(OUTPUT_PATH)

stream = ffmpeg.output(stream, OUTPUT_PATH, **output_args)
stream = ffmpeg.overwrite_output(stream)
ffmpeg.run(stream)
예제 #27
0
 def merge(self, outMerged):  #ffmpeg merge function
     ffmpeg.overlay(vidF, subF).output("C://YT_Downloads/" +
                                       self.outMerged).run()
     print("Successfully merged!")