コード例 #1
0
def test__filter__custom():
    stream = ffmpeg.input('dummy.mp4')
    stream = ffmpeg.filter(stream, 'custom_filter', 'a', 'b', kwarg1='c')
    stream = ffmpeg.output(stream, 'dummy2.mp4')
    assert stream.get_args() == [
        '-i',
        'dummy.mp4',
        '-filter_complex',
        '[0]custom_filter=a:b:kwarg1=c[s0]',
        '-map',
        '[s0]',
        'dummy2.mp4',
    ]
コード例 #2
0
def save_video(fname, images, output_fps=30, vcodec='libx264', filters=''):
    assert isinstance(images, np.ndarray), "images should be np.array: NHWC"
    num_frames, height, width, channels = images.shape
    stream = ffmpeg.input('pipe:', format='rawvideo', 
                          pix_fmt='rgb24', s='{}x{}'.format(width, height))
    stream = ffmpeg.filter(stream, 'setpts', '2*PTS')  # 2*PTS is for slower playback
    stream = ffmpeg.output(stream, fname, pix_fmt='yuv420p', vcodec=vcodec, r=output_fps)
    stream = ffmpeg.overwrite_output(stream)
    process = ffmpeg.run_async(stream, pipe_stdin=True)
    for frame in tqdm(images, desc='writing video to %s' % fname):
        process.stdin.write(frame.astype(np.uint8).tobytes())
    process.stdin.close()
    process.wait()
コード例 #3
0
def generate_video_cover(path, file_id):
    name, output_path = get_filename(path, file_id)
    file = File.objects.filter(pk=file_id).first()
    if not exists(join(settings.MEDIA_ROOT, "covers")):
        os.mkdir(join(settings.MEDIA_ROOT, "covers"))
    if not exists(output_path):
        stream = ffmpeg.input(path)
        stream = ffmpeg.filter(stream, 'scale', 1920, -1)
        stream = ffmpeg.output(stream, output_path, vframes=1)
        ffmpeg.run(stream)
        file.cover.name = join("covers", name)
        file.save()
    return output_path
コード例 #4
0
ファイル: destroyer.py プロジェクト: tatey12/videoEditBot
 def music(SOXCMD, AUDPRE):
     try:
         if notNone(d['musicdelay']):
             d['musicdelay'] = constrain(d['musicdelay'], 0, DURATION)
         if download(f"{pat}/BG{e0}.mp3", d['music'], skip = d['musicskip'], delay = d['musicdelay'], duration = 120, video = False):
             if len(SOXCMD) > 0:
                 exportSox(AUDPRE, "PRE_MUSIC")
                 AUDPRE = "PRE_MUSIC"
             qui(ffmpeg.filter([ffmpeg.input(f"{pat}/{AUDPRE}{e0}.wav"), ffmpeg.input(f"{pat}/BG{e0}.mp3")], "amix", duration = "first").output(f"{pat}/MUSIC{e0}.wav")).run()
             return "MUSIC"
     except Exception as ex:
         fixPrint("music error.", ex)
         #printEx(ex)
         return AUDPRE
コード例 #5
0
ファイル: panopto_dl.py プロジェクト: mbikovitsky/panopto-dl
def process_audio(inputs: Iterable[VideoFile]) -> Optional[Tuple[Any, Dict[str, Any]]]:
    audio_streams = [
        video.ffmpeg_input.audio for video in inputs if video.contains_audio
    ]
    if not audio_streams:
        return None

    if len(audio_streams) == 1:
        return audio_streams[0], {"acodec": "copy"}
    else:
        return (
            ffmpeg.filter(audio_streams, "amix", inputs=len(audio_streams)),
            {"acodec": "libopus"},
        )
コード例 #6
0
def build_video_stream(infile, outfile, rates):
    # Get stream details.
    audio_streams, video_streams, audio, video = split_into_streams(infile)

    # Determine frame rate of input video.
    fps = rates[2]  # default value
    if video_streams and type(video_streams[0]) != str:
        # Determine maxiumum frame rate from first video stream in input file.
        avg_fps_str = video_streams[0][
            'avg_frame_rate']  # picks 1st video stream in list
        avg_fps = round(
            int(avg_fps_str.split('/')[0]) / int(avg_fps_str.split('/')[1]))
        fps = min([avg_fps, fps])

    # Define video max height to 720p (nominal HD).
    video = ffmpeg.filter(video, 'scale', -1, 'min(720, ih)')
    # Define max framerate.
    video = ffmpeg.filter(video, 'fps', fps)

    # Output correct stream.
    stream = generate_output_stream(video_streams, audio_streams, video, audio,
                                    rates, outfile)
    return stream
コード例 #7
0
    def overlay_layers(self, parent_split, overlay_split):
        output_splits = {'video': None, 'audio': None}
        self.logger.debug("overlay_layers")

        video_count = 0
        if parent_split['video'] is not None:
            video_count += 1
            output_splits['video'] = [
                parent_split['video'][0], parent_split['video'][1]
            ]
        if overlay_split['video'] is not None:
            video_count += 1
            output_splits['video'] = [
                overlay_split['video'][0], overlay_split['video'][1]
            ]
        if video_count > 1:
            try:
                self.logger.debug("overlay video")
                top_overlay_0 = ffmpeg.overlay(parent_split['video'][0],
                                               overlay_split['video'][0],
                                               eof_action='pass')
                bottom_overlay_0 = ffmpeg.overlay(parent_split['video'][1],
                                                  overlay_split['video'][1],
                                                  eof_action='repeat')
                output_splits['video'] = [top_overlay_0, bottom_overlay_0]
            except Exception as e:
                self.logger.exception(e)
                raise

        audio_count = 0
        if parent_split['audio'] is not None:
            audio_count += 1
            output_splits['audio'] = parent_split['audio']
        if overlay_split['audio'] is not None:
            audio_count += 1
            output_splits['audio'] = overlay_split['audio']
        if audio_count > 1:
            try:
                self.logger.debug("overlay audio")
                combined_audio = ffmpeg.filter(
                    [parent_split['audio'], overlay_split['audio']],
                    'amix',
                    inputs=2,
                    duration='longest')
                output_splits['audio'] = combined_audio
            except Exception as e:
                self.logger.exception(e)
                raise
        return output_splits
コード例 #8
0
def concat_ranges(filename, out_filename, ranges, config: VideoMontageConfig):
    """ ranges are in seconds """

    assert os.path.isfile(filename)

    input_vid = ffmpeg.input(filename)

    total_duration = sum([x[1] - x[0] for x in ranges])
    print(f'Processing {out_filename} ({len(ranges)} ranges -> {total_duration:.0f} seconds)')

    streams = []

    for r in ranges:
        start = int(r[0])
        end = math.floor(r[1])

        vid = (
            input_vid
                .trim(start=start, end=end)
                .setpts('PTS-STARTPTS')
        )
        aud = (
            input_vid['a:0']
                .filter_('atrim', start=start, end=end)
                .filter_('asetpts', 'PTS-STARTPTS')
        )

        if config.mix_mic_audio_track:
            mic = (
                input_vid['a:1']
                    .filter_('atrim', start=start, end=end)
                    .filter_('asetpts', 'PTS-STARTPTS')
            )
            aud = ffmpeg.filter([aud, mic], 'amix', duration='shortest', weights=f'1 {config.mic_volume_multiplier}')

        streams.append(vid)
        streams.append(aud)

    joined = ffmpeg.concat(*streams, v=1, a=1)
    output = ffmpeg.output(joined, out_filename, vcodec='hevc_nvenc', video_bitrate=config.video_bitrate)
    output = output.global_args('-loglevel', 'error')
    output = ffmpeg.overwrite_output(output)

    start_time = time.time()

    custom_ffmpeg_run(output, ffmpeg_cmd)

    elapsed = time.time() - start_time
    print(f'Elapsed {elapsed:.2f} seconds\n')
コード例 #9
0
ファイル: preprocess.py プロジェクト: Arnav-Ajay/ffmpeg
def create_output(input_audios, audio, overlay, session_num, name):

    overlay.filter("scale", 640, 480)

    if(len(input_audios)<3 and audio!=None):
        output = ffmpeg.output(overlay, audio, name, **{'b:v':'48k', 'b:a':'48k'}).run()

    elif(audio==None):
        output = ffmpeg.output(overlay, name, **{'b:v':'48k'}).run()

    else:
        for i in range(2, len(input_audios)):
            audio = ffmpeg.filter([audio, input_audios[i]], 'amix')

        output = ffmpeg.output(overlay, audio, name, **{'b:v':'48k', 'b:a':'48k'}).run()
コード例 #10
0
ファイル: ffmpeg_funcs.py プロジェクト: Naawww/ffmpeg_gnu
def make_gif_from_video(video_file_path, start=0, duration=2.5, gif_name=None):
    gif_name = gif_name or drop_extension(video_file_path)
    gif_file_path = gif_name + ".gif"

    # get the file, and make the palette
    in_file = ffmpeg.input(video_file_path, ss=start, t=duration)
    palette = in_file.filter("palettegen")

    # scale and lower framerate:
    smaller = in_file.filter('scale', 480, -1).filter('fps', 12)
    # apply the palette to the video clip, and make into a gif
    output_stream = ffmpeg.filter([smaller, palette],
                                  "paletteuse").output(gif_file_path)
    ffmpeg.overwrite_output(output_stream).run()
    return
コード例 #11
0
    def process_file(self, track: Track):
        """
        Attempts to transcode the file in download/temp into the audio file
        """

        temp_file = Path("download/temp")
        if not temp_file.is_file():
            raise Exception("temp file doesn't exist")

        input_ffmpeg = ffmpeg.input("download/temp")
        input_audio = input_ffmpeg['a'] # We will not be referencing the video stream, only the main audio stream

        # Normalise loudness
        input_audio = ffmpeg.filter(input_audio, "loudnorm", I=-16, TP=-1.5, LRA=11)


        # https://superuser.com/questions/1362176/how-to-trim-silence-only-from-beginning-and-end-of-mp3-files-using-ffmpeg/1364824
        # Trim silence from start
        input_audio = ffmpeg.filter(input_audio, "silenceremove", start_periods=1, start_duration=1, start_threshold="-60dB", detection="peak")

        # No idea what this does
        input_audio = ffmpeg.filter(input_audio, "aformat", "dblp")

        # Reverse the audio
        input_audio = ffmpeg.filter(input_audio, "areverse")

        # Trim silence from start (but this track is reversed now, so this is trimming from the end
        input_audio = ffmpeg.filter(input_audio, "silenceremove", start_periods=1, start_duration=1, start_threshold="-60dB", detection="peak")

        # No idea what this does
        input_audio = ffmpeg.filter(input_audio, "aformat", "dblp")

        # Reverse the audio
        input_audio = ffmpeg.filter(input_audio, "areverse")

        output_ffmpeg = ffmpeg.output(input_audio, filename=f"download/{track.info.source_id}.mp3")
        output_ffmpeg = ffmpeg.overwrite_output(output_ffmpeg) # overwrite if needed


        def run_ffmpeg(output):
            return ffmpeg.run(output, quiet=True)

        # Force monkey patching to run this in the background in this supposed background task...
        tpool.execute(run_ffmpeg, output_ffmpeg)

        try:
            os.remove("download/temp")
        except:
            raise Exception("failed to delete temp file")
コード例 #12
0
def main(args):
    if args.video is not None:
        stream=ffmpeg.input(args.video)
        stream=ffmpeg.filter(stream, 'fps', fps=40, round = 'up')
        stream=ffmpeg.output(stream, "/home/cristina/Documentos/Videos/Frame-%d.png", video_bitrate='5000k',sws_flags='bilinear',start_number=0)
        ffmpeg.run(stream)
    if args.videoFrames is not None:
        pathFrames=args.videoFrames 
        if args.json is not None:
            jsonPath=args.json
            cutVideo(pathFrames, jsonPath)
    if args.pathH is not None:
        clusterHOG(args.pathH)
    if args.guardarImH is not None:
        saveHOG(args.guardarImH)
    if args.colorDominante is not None:
        calcularKmeansColorDominante(args.colorDominante)
コード例 #13
0
def interpolate(stream, mode='blend'):
    """create  interpolation function that  blends between frames"""
    options = dict(fps=24)
    if mode == 'blend':
        options.update(dict(
            mi_mode='blend'
        ))
    elif mode == 'flow':
        options.update(dict(
            mi_mode='mci',
            mc_mode='aobmc',
            me_mode='bilat',
            vsbmc=0,
            mb_size=8
        ))
    filter_ = ffmpeg.filter(stream, 'minterpolate', **options)
    return filter_
コード例 #14
0
ファイル: fun.py プロジェクト: digitalcircuits/g_man
    async def _harmonize(self, ctx, vstream, astream, kwargs):
        pitches = kwargs['pitches']
        astreams = []
        for i in range(len(pitches)):
            pitch = pitches[i]
            astream = astream.asplit()
            astreams.append(astream[1].filter('rubberband', pitch=pitch))
            astream = astream[0]
        astreams.append(astream)

        astream = astreams[0]
        for i in range(1, len(astreams)):
            astream = ffmpeg.filter([astream, astreams[i]],
                                    'amix').filter('volume',
                                                   volume=2,
                                                   precision='fixed')

        return vstream, astream, {}
コード例 #15
0
def palettegen(args):
    """
    This function generates a palette image.

    :type args: class
    :param args:

    :rtype: str
    :return: Path that saved palette.png
    """
    stream = ffmpeg.input(args.input)
    stream = ffmpeg.filter(stream, 'palettegen')
    stream = ffmpeg.output(stream, args.path + "/" + "palette.png")
    try:
        ffmpeg.run(stream, overwrite_output=args.yes)
    except Exception as e:
        print("Error: %s" % e)

    return args.path + "/" + "palette.png"
コード例 #16
0
def save_video(fname, images, output_fps=30, vcodec="libx264", filters=""):
    assert isinstance(images, np.ndarray), "images should be np.array: NHWC"
    num_frames, height, width, channels = images.shape
    stream = ffmpeg.input("pipe:",
                          format="rawvideo",
                          pix_fmt="rgb24",
                          s="{}x{}".format(width, height))
    stream = ffmpeg.filter(stream, "setpts",
                           "2*PTS")  # 2*PTS is for slower playback
    stream = ffmpeg.output(stream,
                           fname,
                           pix_fmt="yuv420p",
                           vcodec=vcodec,
                           r=output_fps)
    stream = ffmpeg.overwrite_output(stream)
    process = ffmpeg.run_async(stream, pipe_stdin=True)
    for frame in tqdm(images, desc="writing video to %s" % fname):
        process.stdin.write(frame.astype(np.uint8).tobytes())
    process.stdin.close()
    process.wait()
コード例 #17
0
ファイル: main.py プロジェクト: DavidDriessen/VRAS_pre-roll
def gen_current_session_poster(session, duration=10):
    current_session_posters = glob.glob(glob.escape(session) + '/*')
    if len(current_session_posters) == 0:
        sys.exit("Please put posters in session folder '" + session + "'")
    current_session_posters = list(
        map(
            lambda poster: ffmpeg.
            input(poster, framerate=25, t=duration, loop=1).filter(
                'scale', min(
                    [320, floor(920 / len(current_session_posters))]), 450),
            current_session_posters))
    if len(current_session_posters) > 1:
        session_frame = ffmpeg.filter(current_session_posters,
                                      'hstack',
                                      inputs=len(current_session_posters))
    else:
        session_frame = current_session_posters[0]
    return session_frame \
        .filter('pad', width=920, height=540, x='(ow-iw)/2', y='(oh-ih)', color='black') \
        .drawtext("This session:", fontsize=38, fontcolor='white', x='(w-tw)/2') \
        .drawtext(gen_session_name(session), fontsize=37, fontcolor='white', x='(w-tw)/2', y=48)
コード例 #18
0
def stack_2x2(input_dir, zoom, col, row, frame_pattern='%05d', **kwargs):
    """create an ffmpeg path that stacks four streams of images of 256x256 and generates a video of 512x512"""
    assert zoom >= 1, 'zoom level 0 does not have 4 images'
    assert col % 2 == 0,  'col should  be even'
    assert row % 2 == 0,  'row should  be even'
    # define 4  streams
    input_dir = pathlib.Path(input_dir)
    inputs = [
        ffmpeg.input(str(input_dir / frame_pattern / '{zoom}/{col}/{row}.png'.format(zoom=zoom, col=col, row=row)), **kwargs),
        ffmpeg.input(str(input_dir / frame_pattern / '{zoom}/{col}/{row}.png'.format(zoom=zoom, col=col+1, row=row)), **kwargs),
        ffmpeg.input(str(input_dir / frame_pattern / '{zoom}/{col}/{row}.png'.format(zoom=zoom, col=col, row=row+1)), **kwargs),
        ffmpeg.input(str(input_dir / frame_pattern / '{zoom}/{col}/{row}.png'.format(zoom=zoom, col=col+1, row=row+1)), **kwargs),
    ]
    # stack
    stacked = ffmpeg.filter(
        inputs,
        'xstack',
        inputs=4,
        layout='0_0|w0_0|0_h0|w0_h0'
    )
    return stacked
コード例 #19
0
 def __init__(self, in_src, out_dir, channel_id):
     super().__init__(in_src, out_dir, channel_id)
     self.ff_segment_out = (ffmpeg.filter(
         self.ff_input.video,
         'scale',
         size='320x240',
         force_original_aspect_ratio='decrease').output(
             self.out_dir + '/playlist.m3u8',
             format='hls',
             preset='ultrafast',
             threads=1,
             vcodec='libx264',
             **{
                 'profile:v': 'main'
             },
             x264opts='keyint=25:min-keyint=25:no-scenecut',
             flags='+cgop',
             hls_flags='delete_segments',
             hls_segment_type='mpegts',
             hls_init_time=0,
             hls_list_size=5,
             hls_allow_cache=0,
             hls_time=1).overwrite_output())
コード例 #20
0
def toFrames(path, video):
    stream = ffmpeg.input(path + video)
    stream = ffmpeg.filter(stream, 'fps', fps=40, round='up')
    try:
        #os.stat("/home/pazagra/Cris/TFG-EndoscopySegementation/Results/"+video+"/")
        os.stat(
            "/home/cristina/Documentos/TFG/TFG-EndoscopySegementation/Results/"
            + video + "/")

    except:
        #os.mkdir("/home/pazagra/Cris/TFG-EndoscopySegementation/Results/"+video+"/")
        os.mkdir(
            "/home/cristina/Documentos/TFG/TFG-EndoscopySegementation/Results/"
            + video + "/")
    #stream=ffmpeg.output(stream, "/home/pazagra/Cris/TFG-EndoscopySegementation/Results/"+video+"/%d.png", video_bitrate='5000k',sws_flags='bilinear',start_number=0)
    stream = ffmpeg.output(
        stream,
        "/home/cristina/Documentos/TFG/TFG-EndoscopySegementation/Results/" +
        video + "/%d.png",
        video_bitrate='5000k',
        sws_flags='bilinear',
        start_number=0)
    ffmpeg.run(stream)
コード例 #21
0
def mix(audio1, audio2, sr):
    """
    Function to mix audios with a normalised loudness
    :param audio1: Audio vector to normalize
    :param audio2: Audio vector to normalize
    :param sr: Sample rate of the final mix
    :return: Audio vector of the normalised mix
    """
    if audio1.ndim > 1:
        audio1 = std.MonoMixer()(audio1, audio1.shape[1])
    if audio2.ndim > 1:
        audio2 = std.MonoMixer()(audio2, audio2.shape[1])
    std.MonoWriter(filename='temporal1.wav', sampleRate=sr)(audio1)
    std.MonoWriter(filename='temporal2.wav', sampleRate=sr)(audio2)

    stream1 = (ffmpeg.input('temporal1.wav').filter('loudnorm'))

    stream2 = (ffmpeg.input('temporal2.wav').filter('loudnorm'))
    merged_audio = ffmpeg.filter([stream1, stream2], 'amix')
    ffmpeg.output(merged_audio, 'temporal_o.wav').overwrite_output().run()

    audio_numpy = std.MonoLoader(filename='temporal_o.wav')()
    return audio_numpy
コード例 #22
0
    def write(filename=None, fps=30):
        """
        Write sequence of frames to a MPEG video file.

        :param filename: Output filename for video \\
        :param fps: Frame rate of output video
        """

        # Input sequence of images
        stream = ffmpeg.input(
            os.path.join(os.getcwd(), 'motion_detection', 'out',
                         'out_%3d.png'))

        # Set frame rate (default is 30 frames per second)
        stream = ffmpeg.filter(stream, 'fps', fps=fps)

        # Output video sequence
        stream = ffmpeg.output(
            stream,
            os.path.join(os.getcwd(), 'motion_detection', 'video', filename))

        # Generate output video
        ffmpeg.run(stream)
コード例 #23
0
ファイル: utils.py プロジェクト: kail85/stargan-v2
def save_video(args,
               fname,
               images,
               output_fps=30,
               vcodec='libx264',
               filters=''):
    assert isinstance(images, np.ndarray), "images should be np.array: NHWC"
    num_frames, height, width, channels = images.shape

    if (args.img_datatype == cv2.CV_8UC1) or (args.img_datatype
                                              == cv2.CV_8UC3):
        target_datatype = np.uint8
        pixel_format = 'rgb24'
    elif args.img_datatype == cv2.CV_16UC1:
        target_datatype = np.uint16
        pixel_format = 'rgb48'
    else:
        raise ValueError('Unknown image data type.')

    stream = ffmpeg.input('pipe:',
                          format='rawvideo',
                          pix_fmt=pixel_format,
                          s='{}x{}'.format(width, height))
    stream = ffmpeg.filter(stream, 'setpts',
                           '2*PTS')  # 2*PTS is for slower playback
    stream = ffmpeg.output(stream,
                           fname,
                           pix_fmt='yuv420p',
                           vcodec=vcodec,
                           r=output_fps)
    stream = ffmpeg.overwrite_output(stream)
    process = ffmpeg.run_async(stream, pipe_stdin=True)

    for frame in tqdm(images, desc='writing video to %s' % fname):
        process.stdin.write(frame.astype(target_datatype).tobytes())
    process.stdin.close()
    process.wait()
コード例 #24
0
def gen():
    """Video streaming generator function."""
    #while True:

    #Failed option 1
    cap = ffmpeg.input('video_fragments/hls_outputs_480p_0000.ts')
    cap = ffmpeg.filter(cap, 'scale', 500, 500)
    #ffmpeg.run(cap)

    #Failed option 2
    # cap = acapture.open('video_fragments/hls_outputs_480p_0000.ts')

    while True:
        # Capture frame-by-frame
        ret, img = cap.read()
        if ret == True:
            #img = cv2.resize(img, (0,0), fx=1.5, fy=1.5)
            #img = ffmpeg.filter(img, 'scale', 500, 500)
            frame = cv2.imencode('.jpg', img)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
            time.sleep(0.1)
        else:
            break
コード例 #25
0
ファイル: video_merger.py プロジェクト: Ezra-Bernstein/musync
def merge_all(user_inst, maxw, maxh, npc, class_code):
    curv = []
    curinst = user_inst[0][1]

    insts = [curinst]

    for i in user_inst:
        if (i[1] != curinst):
            merge_inst(curinst, curv, maxw, maxh, npc, class_code)
            curinst = i[1]
            curv = []
            insts.append(curinst)
        curv.append("/tmp/new_" + str(class_code) + "/" + i[0] + delim + i[1] +
                    "m.mp4")

    merge_inst(curinst, curv, maxw, maxh, npc, class_code)

    print(insts)

    inp_insts = []

    for i in range(len(insts)):
        inp_insts.append(
            ffmpeg.input("/tmp/new_" + str(class_code) + "/" +
                         '{}fin.mp4'.format(insts[i])))

    if (len(insts) > 1):
        #final video
        (ffmpeg.filter(
            inp_insts, 'vstack',
            len(insts)).output("/tmp/new_" + str(class_code) + "/" +
                               "combined.mp4").global_args('-y').run())
    else:
        #we need to rename the file
        os.system("mv /tmp/new_{}/{}fin.mp4 /tmp/new_{}/combined.mp4".format(
            class_code, insts[0], class_code))
コード例 #26
0
 async def _backwards(self, ctx, vstream, astream, kwargs):
     vstream = ffmpeg.filter(vstream, 'reverse')
     astream = ffmpeg.filter(astream, 'areverse')
     return (vstream, astream, {})
コード例 #27
0
ファイル: post_production.py プロジェクト: krook/renderer
def main(args):
    cos = create_cos_client(args)

    if not cos:
        raise ValueError(f"could not create COS instance")

    notification = args.get('notification', {})
    key = args.get('key', notification.get('object_name', ''))

    # parse the key
    choir_id, song_id, def_id = Path(key).stem.split('-')[0].split('+')

    src_bucket = args['preprod_bucket']
    dst_bucket = args['preview_bucket']
    misc_bucket = args['misc_bucket']
    definition_bucket = args['definition_bucket']

    # Download the definition file for this job
    definition_key = f'{choir_id}+{song_id}+{def_id}.json'
    definition_object = cos.get_object(
        Bucket=definition_bucket,
        Key=definition_key,
    )
    definition = json.load(definition_object['Body'])
    output_spec = definition['output']

    geo = args['geo']
    host = args.get('endpoint', args['ENDPOINT'])
    cos_hmac_keys = args['__bx_creds']['cloud-object-storage']['cos_hmac_keys']
    cos_api_key = cos_hmac_keys['access_key_id']
    cos_api_secret = cos_hmac_keys['secret_access_key']

    get_input_url = partial(create_signed_url,
                            host,
                            'GET',
                            cos_api_key,
                            cos_api_secret,
                            geo,
                            src_bucket)
    
    get_misc_url = partial(create_signed_url,
                            host,
                            'GET',
                            cos_api_key,
                            cos_api_secret,
                            geo,
                            misc_bucket)

    ###
    ### Combine video and audio
    ###
    
    # Create a temp dir for our files to use
    with tempfile.TemporaryDirectory() as tmpdir:

        print("Doing first pass")
        stream = ffmpeg.input(get_input_url(key),
                              seekable=0)
        audio = stream.audio
        audio = audio.filter('volumedetect')
        pipeline = ffmpeg.output(audio,
                                 "-",
                                 format='null')

        cmd = pipeline.compile()
        print("ffmpeg command to run: ", cmd)

        stdout, stderr = pipeline.run(capture_stdout=True,
                                      capture_stderr=True)
        output = stdout + stderr
        output_lines = [line.strip() for line in output.decode().split('\n')]

        mute = False

        # Volume detect
        vol_threshold = int(args.get('vol_threshold', 22))
        vol_pct = float(args.get('vol_pct', 0.05))

        total_samples = 0
        high_samples = 0
        max_volume = 0
        hist_re = re.compile(r'histogram_(\d+)db: (\d+)')
        maxvol_re = re.compile(r'max_volume: (-?\d+\.?\d*) dB')
        for line in output_lines:
            # Search for histogram
            mo = hist_re.search(line)
            if mo:
                level, samples = mo.groups()
                total_samples += int(samples)
                if int(level) < vol_threshold:
                    high_samples += int(samples)

            # Search for max volume
            mo = maxvol_re.search(line)
            if mo:
                max_volume = float(mo.groups()[0])

        if high_samples/total_samples < vol_pct:
            print(f"Input volume is so low, we are muting it {high_samples/total_samples:.2f} above {vol_threshold}")
            mute = True

        target_peak = 0
        volume_gain = target_peak - max_volume
        volume_gain = f"{volume_gain:.2f} dB"

        # Second pass, apply normalisation
        print("Doing second pass loudnorm")
        stream = ffmpeg.input(get_input_url(key),
                              seekable=0)

        video = stream.video
        audio = stream.audio

        # Pad the video to final size, place video in center
        output_width, output_height = output_spec['size']
        video = video.filter('pad',
                             x=-1,
                             y=-1,
                             width=output_width,
                             height=output_height)

        # Overlay the watermark if present
        watermark_file = output_spec.get('watermark')
        if watermark_file:
            watermark_url = get_misc_url(watermark_file)
            watermark = ffmpeg.input(watermark_url,
                                     seekable=0)
            video = video.overlay(watermark,
                                  x='W-w-20',
                                  y='H-h-20')

        print("Volume gain to apply:", volume_gain)
        audio = audio.filter('volume',
                             volume_gain)

        # Add in audio compression
#        audio = audio.filter('acompressor')
        
        # Add reverb in if present
        reverb_type = output_spec.get('reverb_type')
        if reverb_type:
            reverb_url = get_misc_url(f'{reverb_type}.wav')
            reverb_pct = float(output_spec.get('reverb', 0.1))
            if reverb_pct > 0:
                reverb_part = ffmpeg.input(reverb_url,
                                           seekable=0)
                split_audio = audio.filter_multi_output('asplit')
                reverb = ffmpeg.filter([split_audio[1], reverb_part],
                                       'afir',
                                       dry=10, wet=10)
                audio = ffmpeg.filter([split_audio[0], reverb],
                                      'amix',
                                      dropout_transition=180,
                                      inputs=2,
                                      weights=f'{1-reverb_pct} {reverb_pct}')

        # Output
        output_key = f'{choir_id}+{song_id}+{def_id}-final.mp4'
        output_path = str(Path(tmpdir, output_key))

        kwargs = {}
        if 'duration' in args:
            kwargs['t'] = int(args['duration'])

        if 'loglevel' in args:
            kwargs['v'] = args['loglevel']

        pipeline = ffmpeg.output(audio,
                                 video,
                                 output_path,
                                 pix_fmt='yuv420p',
                                 vcodec='libx264',
                                 preset='veryfast',
                                 movflags='+faststart',
                                 **kwargs
        ) 
        cmd = pipeline.compile()
        print("ffmpeg command to run: ", cmd)
        t1 = time.time()
        pipeline.run()
        t2 = time.time()

        # Upload the final file
        cos.upload_file(output_path, dst_bucket, output_key)
        
        ret = {'dst_key': output_key,
               'def_id': def_id,
               'render_time': int(t2-t1),
               'status': 'ok',
               'choir_id': choir_id,
               'song_id': song_id,
               'status': 'done'}

        return ret
コード例 #28
0
ファイル: 281.py プロジェクト: spirulence/ffmpeg-issues
import ffmpeg

input_video = ffmpeg.input("../resources/video_with_audio.mp4")
added_audio = ffmpeg.input("../resources/dance_beat.ogg").audio.filter(
    'adelay', "1500|1500")

merged_audio = ffmpeg.filter([input_video.audio, added_audio], 'amix')

(ffmpeg.concat(input_video, merged_audio, v=1,
               a=1).output("mix_delayed_audio.mp4").run(overwrite_output=True))
コード例 #29
0
def buildpreroll(stream, filelocation):
    titleoffset = ((len(name) * 33) / 2) - 7
    if titleoffset > 716:
        title = textwrap.fill(name, width=40, break_long_words=False)
        titlenl = title.find("\n")
        titleoffset = ((titlenl * 33) / 2) - 7
    description = textwrap.fill(summary, width=22, break_long_words=False)
    num_of_lines = description.count("\n")
    if num_of_lines > 22:
        descriptionSize = 580 / num_of_lines
    else:
        descriptionSize = 26
    sidebar = ffmpeg.input(
        "{}overlays/prerolloverlay.mov".format(container_folder))
    poster = ffmpeg.input("{}poster.jpg".format(container_folder))
    fadeout = ffmpeg.input("{}overlays/fadeout.mov".format(container_folder))
    titlefont = "{}fonts/Bebas-Regular.ttf".format(container_folder)
    descriptionfont = "{}fonts/Roboto-Light.ttf".format(container_folder)
    poster = ffmpeg.filter(poster, "scale", 200, -1)
    preroll = ffmpeg.input("{}".format(filelocation), ss=10, t=Preroll_length)
    preroll = ffmpeg.filter(preroll, "scale", 1600, -1)
    prerollaudio = ffmpeg.input("{}prerollaudio.mp3".format(container_folder))
    preroll = ffmpeg.overlay(sidebar, preroll, x=300, y=125)
    preroll = ffmpeg.overlay(preroll, poster, x=40, y=195, enable="gte(t,1)")
    if CriticRating == "":
        preroll = ffmpeg.drawtext(
            preroll,
            text="Audiance Rating: {}%".format(AudienceRating),
            fontfile=titlefont,
            x=3,
            y=150,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=36,
            enable="gte(t,1)",
        )
    elif AudienceRating == "":
        preroll = ffmpeg.drawtext(
            preroll,
            text="Audiance Rating: {}%".format(CriticRating),
            fontfile=titlefont,
            x=3,
            y=150,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=36,
            enable="gte(t,1)",
        )
    elif CriticRating == "" and AudienceRating == "":
        print("we have no ratings available")
    else:
        preroll = ffmpeg.drawtext(
            preroll,
            text="Audiance Rating: {}%".format(AudienceRating),
            fontfile=titlefont,
            x=3,
            y=165,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=32,
            enable="gte(t,1)",
        )

        preroll = ffmpeg.drawtext(
            preroll,
            text="Critic Rating: {}%".format(CriticRating),
            fontfile=titlefont,
            x=3,
            y=135,
            escape_text=True,
            fontcolor="0xFFFFFF@0xff",
            fontsize=32,
            enable="gte(t,1)",
        )
    preroll = ffmpeg.drawtext(
        preroll,
        text=name,
        fontfile=titlefont,
        x=(1106 - titleoffset),
        y=20,
        escape_text=True,
        fontcolor="0xFFFFFF@0xff",
        fontsize=76,
        enable="gte(t,1)",
    )
    preroll = ffmpeg.drawtext(
        preroll,
        text=description,
        fontfile=descriptionfont,
        x=3,
        y=500,
        escape_text=True,
        fontcolor="0xFFFFFF@0xff",
        fontsize=descriptionSize,
        enable="gte(t,1)",
    )
    preroll = ffmpeg.overlay(preroll, fadeout)
    preroll = ffmpeg.output(
        prerollaudio, preroll,
        ("{}prerolls/{} Preroll.mp4".format(container_folder, name)))
    ffmpeg.run(preroll)
    dirListing = os.listdir("{}prerolls/".format(container_folder))
    full_path = ["Prerolls/{0}".format(x) for x in dirListing]
    if len(dirListing) > 26:
        oldest_file = min(full_path, key=os.path.getctime)
        os.remove(oldest_file)
        plexsetting = re.sub("{}{}".format(container_folder, oldest_file), "",
                             plexsetting)
    preroll_list = (';{}prerolls/'.format(folder)).join(
        os.listdir("{}prerolls/".format(container_folder)))
    preroll_list = (";{}{}".format(folder, preroll_list))
    print(preroll_list)
    plex.settings.get("cinemaTrailersPrerollID").set(preroll_list)
    plex.settings.save()
    os.remove("{}poster.jpg".format(container_folder))
    os.remove("{}prerollaudio.mp3".format(container_folder))
    os.remove("{}".format(filelocation))
    print("done!")
コード例 #30
0
        if not (self.img and self.background and self.mask):
            return None
        
        img_video = img_to_video(self.img, self.duration, 'zoom_out')
        if not img_video:
            return None
    
        complete_path = os.path.join(UPLOAD_DIR, gen_filename(".mp4"))
        
        img_video_input = ffmpeg.input(img_video)
        bg_input = ffmpeg.input(self.background)
        mask_input = ffmpeg.input(self.mask)
        if self.foreground:
            fg_input = ffmpeg.input(self.foreground)

mask_inverted = ffmpeg.filter(mask_input, 'negate')
bg_merged = ffmpeg.filter([bg_input, mask_inverted], 'alphamerge')

img_video_overlay = ffmpeg.filter(
                                  [img_video_input, bg_merged], 'overlay', format='auto')
    if self.foreground:
        fg_input_inverted = ffmpeg.filter(
                                          fg_input, 'mergeplanes', '0x00010200', format='yuva420p')
            img_video_overlay = ffmpeg.filter(
                                              [img_video_overlay, fg_input_inverted], 'overlay')
                                          img_video_overlay = ffmpeg.output(img_video_overlay, complete_path)
                                          try:
                                          ffmpeg.run(img_video_overlay)
                                          except Exception as e:
                                          error('errors', f'scene_error: {e}')
                                          return None