示例#1
0
def test_filter_concat__wrong_stream_count():
    in1 = ffmpeg.input('in1.mp4')
    in2 = ffmpeg.input('in2.mp4')
    with pytest.raises(ValueError) as excinfo:
        ffmpeg.concat(in1.video, in1.audio, in2.hflip(), v=1, a=1).node
    assert (
        str(excinfo.value) ==
        'Expected concat input streams to have length multiple of 2 (v=1, a=1); got 3'
    )
示例#2
0
    def download_reddit_video(self, submission, output_path):
        media = getattr(submission, "media", None)
        media_id = submission.url.split("v.redd.it/")[-1]

        self.logger.spam(self.indent_2 + "Looking for submission.media")

        if media == None:
            # link might be a crosspost
            crosspost_parent_list = getattr(submission,
                                            "crosspost_parent_list", None)
            if crosspost_parent_list != None:
                self.logger.spam(self.indent_2 +
                                 "This is a crosspost to a reddit video")
                first_parent = crosspost_parent_list[0]
                media = first_parent["media"]

        if media != None:
            self.logger.spam(self.indent_2 + "Downloading video component")
            url = media["reddit_video"]["fallback_url"]
            video_save_path = os.path.join(output_path,
                                           media_id + "_video.mp4")
            try:
                urllib.request.urlretrieve(url, video_save_path)
            except Exception as e:
                self.print_formatted_error(e)

            # Download the audio
            self.logger.spam(self.indent_2 + "Downloading audio component")
            audio_downloaded = False
            audio_save_path = os.path.join(output_path,
                                           media_id + "_audio.mp4")
            try:
                urllib.request.urlretrieve(submission.url + "/DASH_audio.mp4",
                                           audio_save_path)
                audio_downloaded = True
            except Exception as e:
                pass

            if audio_downloaded == True:
                # Merge mp4 files
                self.logger.spam(
                    self.indent_2 +
                    "Merging video & audio components with ffmpeg")
                output_save_path = os.path.join(output_path, media_id + ".mp4")
                input_video = ffmpeg.input(video_save_path)
                input_audio = ffmpeg.input(audio_save_path)
                ffmpeg.concat(input_video, input_audio, v=1, a=1)\
                    .output(output_save_path)\
                    .global_args('-loglevel', 'error')\
                      .global_args('-y')\
                    .run()
                self.logger.spam(self.indent_2 + "Done merging with ffmpeg")
            else:
                self.logger.spam(self.indent_2 +
                                 "This video does not have an audio component")

            self.logger.spam(self.indent_2 + "Sucessfully saved video")
示例#3
0
def changemavoice(file_path,inputlanguage,outputlanguage):
    
    file_path="./media/"+str(file_path)
    orgfile=os.path.basename(str(file_path))
    filenaam='translated'+os.path.basename(str(file_path))
    data_preprocess(file_path)
    # myRecognizeCallback = MyRecognizeCallback()
    lang=inputlanguage
    lang1=outputlanguage
    
    languagemodel=optionsSTT[lang]
    languagemodel1=optionTTS[lang1]
    speechtotext(languagemodel)
    # with open('./output/testvideo/vocals.wav','rb') as audio_fill:
    #     audio_sauce = AudioSource(audio_fill)
    #     speech_to_text.recognize_using_websocket(audio=audio_sauce,content_type='audio/wav',recognize_callback=myRecognizeCallback,model=languagemodel,keywords=['colorado', 'tornado', 'tornadoes'],keywords_threshold=0.5,max_alternatives=1)
    print("-------------------------")
    print("SPEECH TO TEXT DONE")

    langa= texttotext(d,optionTTsl[lang],optionTTTtl[lang1])
    print("-------------------------")
    print("TEXT TO TEXT DONE")
    texttospeech(langa,languagemodel1)
    print("-------------------------")
    print("TEXT TO SPEECH DONE")

    gen= vidgender(file_path)
    if gen=="Male":     
        if outputlanguage in malevoicesavailable:
            texttospeechbluemix(langa,voicesmale[outputlanguage])
            shutil.copy('./result/audio/welcome.wav','./result/gan/welcome1.wav')
        xcv='python3 convert.py'
        subprocess.call(xcv,shell=True)
        #male()

    os.chdir('./result/')

    lipGAN='python3 batch_inference.py --checkpoint_path logs/lipgan_residual_mel.h5 --model residual --face "testvideo.mp4" --fps 24 --audio ./gan/welcome1.wav --results_dir ./video'
    subprocess.call(lipGAN,shell=True)
    commander="ffmpeg -i './video/result_voice.avi' -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict experimental -f mp4 './video/result_voice.mp4'"
    subprocess.call(commander,shell=True)
    commander2="ffmpeg -i './video/result.avi' -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict experimental -f mp4 './video/result.mp4'"
    subprocess.call(commander2,shell=True)
    os.chdir('..')
    input_video = ffmpeg.input('./result/video/result.mp4')
    # if gen=="Male":
    #     input_audio1 = ffmpeg.input('./result/audio/welcome.wav')
    # else:
    #     input_audio1 = ffmpeg.input('./result/gan/welcome.wav')
    input_audio1 = ffmpeg.input('./result/audio/welcome.wav')
    ffmpeg.concat(input_video, input_audio1, v=1, a=1).output('./media/Videos/LQ'+orgfile).run()

    transfilenaam='./media/Videos/'+filenaam
    shutil.copy('./result/video/result_voice.mp4',transfilenaam)

    finalpath=orgfile
    return gen,finalpath
def video_file_FFMPEG(temp_stored_file: Path, optimize: bool):
    try:
        if not optimize and not os.environ.get('SAVE_ORIGINAL') == 'True':
            raise HTTPException(
                status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
                detail='Save original is dissabled, contact admin')

        local_savings(videos=True)
        origin, optimized = generate_unique_name(
            os.environ.get('VIDEO_AllOWED_FILE_FORMAT'),
            os.environ.get('VIDEO_DESIRED_FILE_FORMAT'))
        # Save original with config is ready for original file of mp4 or mov also decreases size by default
        if os.environ.get('SAVE_ORIGINAL') == 'True':
            (ffmpeg.input(temp_stored_file).output(
                os.environ.get('VIDEO_ORIGINAL_LOCAL_PATH') + origin,
                vcodec='h264',
                acodec='aac').run(quiet=True))
        else:
            origin = None
        if optimize:
            audio = ffmpeg.input(temp_stored_file).audio
            video = ffmpeg.input(temp_stored_file).video.filter(
                'scale',
                size='640x1136',
                force_original_aspect_ratio='decrease').filter(
                    'pad', '640', '1136', '(ow-iw)/2', '(oh-ih)/2')
            ffmpeg.concat(video, audio, v=1, a=1)
            # ffmpeg config for webm
            # Also is possible to use vcodec libvpx-vp9 but sometimes it increzes size needs testing may it suits you more
            # Check docs https://trac.ffmpeg.org/wiki/Encode/VP9
            if os.environ.get('VIDEO_DESIRED_FILE_FORMAT') == 'webm':
                out = ffmpeg.output(
                    video,
                    audio,
                    os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + optimized,
                    crf='10',
                    qmin='0',
                    qmax='50',
                    video_bitrate='1M',
                    vcodec='libvpx',
                    acodec='libvorbis')
            # ffmpeg config for mp4
            elif os.environ.get('VIDEO_DESIRED_FILE_FORMAT') == 'mp4':
                out = ffmpeg.output(
                    video,
                    audio,
                    os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + optimized,
                    vcodec='h264',
                    acodec='aac')
            out.run(quiet=True)
        else:
            optimized = None
        return {'original': origin, 'optimized': optimized}
    except:
        raise HTTPException(status_code=503,
                            detail="Video manipulation failed using FFMPEG")
示例#5
0
def exportvideo():
    i = 0
    global vidavg_list
    global finframe_list
    print(vidavg_list)
    print(finframe_list)

    # Cut up videos into their clip form
    for video in videos:
        (ffmpeg.input(f'imports/{video}').trim(
            start_frame=vidavg_list[i], end_frame=finframe_list[i]).filter(
                'scale', width,
                height).output(f'exports/video{i+1}.mp4',
                               framerate=args.frames).overwrite_output().run())
        i += 1

    # Combine all those new videos
    exports = os.listdir('exports/')
    for video in exports:
        exports = os.listdir('exports/')
        if not (len(exports)
                == 1):  # Checks if the final output is the only one left
            if exports[0] == 'export.mp4':
                # rename to allow code to continue as it cannot replace it with the same name
                os.rename(f'exports/{exports[0]}', f'exports/vid0.mp4')
                exports = os.listdir('exports/')

            if not exports[
                    0] == 'vid0.mp4':  # If two clips have NOT been combined yet
                vid0 = ffmpeg.input(f'exports/{exports[0]}')
                vid1 = ffmpeg.input(f'exports/{exports[1]}')
                (ffmpeg.concat(
                    vid0,
                    vid1,
                ).output('exports/export.mp4', framerate=args.frames).run())
                vidavg_list.pop(1)  # as videos get combined,
                vidavg_list.pop(
                    0)  # remove their first and last frame positions
                finframe_list.pop(1)
                finframe_list.pop(0)
            else:  # If two clips HAVE been combined
                vid0 = ffmpeg.input(f'exports/{exports[0]}')
                vid1 = ffmpeg.input(f'exports/{exports[1]}')
                if len(vidavg_list) > 1 and len(finframe_list) > 1:
                    vidavg_list.pop(0)
                    finframe_list.pop(0)
                (ffmpeg.concat(
                    vid0,
                    vid1,
                ).output('exports/export.mp4', framerate=args.frames).run())
            # The newly created 'export.mp4' does not get counted yet in the exports list.
            os.remove(f'exports/{exports[1]}')
            os.remove(f'exports/{exports[0]}'
                      )  # Delete [1] first so it can delete [0]
        else:
            print('All videos edited together!')
示例#6
0
    def mergeAudioandVideo(self):

        import ffmpeg

        input_video = ffmpeg.input('./test/test_video.webm')
        input_audio = ffmpeg.input('./test/test_audio.webm')

        ffmpeg.concat(
            input_video, input_audio, v=1,
            a=1).output('./processed_folder/finished_video.mp4').run()
示例#7
0
def test_fluent_concat():
    base = ffmpeg.input('dummy.mp4')
    trimmed1 = base.trim(start_frame=10, end_frame=20)
    trimmed2 = base.trim(start_frame=30, end_frame=40)
    trimmed3 = base.trim(start_frame=50, end_frame=60)
    concat1 = ffmpeg.concat(trimmed1, trimmed2, trimmed3)
    concat2 = ffmpeg.concat(trimmed1, trimmed2, trimmed3)
    concat3 = ffmpeg.concat(trimmed1, trimmed3, trimmed2)
    assert concat1 == concat2
    assert concat1 != concat3
示例#8
0
def merge_audio_video(video_path, audio_path, version=2):
    audio_name = os.path.basename(audio_path).split(".")[0]
    output_path = os.path.join(
        os.path.dirname(video_path),
        audio_name + "-video-audio-version{}.mp4".format(version))
    input_video = ffmpeg.input(video_path)
    input_audio = ffmpeg.input(audio_path)
    ffmpeg.concat(input_video, input_audio,
                  v=1, a=1).output(output_path).global_args(
                      '-loglevel', 'error').global_args('-y').run()
    return output_path
def concat_videos(input_filepaths, output_filepath, **kwargs):
    ins = [ffmpeg.input(filepath) for filepath in input_filepaths]

    outs = []
    for i in ins:
        outs.append(i.video)
        outs.append(i.audio)

    ffmpeg.concat(*outs, a=1, v=1)\
    .output(output_filepath, **kwargs)\
    .run()
    def download_video(video, title, description, path, video_itag, audio_itag, lang):
        try:
            os.mkdir(os.path.join(path, title))
            path = os.path.join(path, title)
        except OSError:
            word = RandomWords().get_random_word()
            os.mkdir(os.path.join(path, title + word))
            path = os.path.join(path, title + word)

        if video_itag != '':
            print('video --', end=' ')
            video.streams.get_by_itag(video_itag).download(path)
            print('done')

        if audio_itag != '':
            print('audio --', end=' ')
            video.streams.get_by_itag(audio_itag).download(path)
            print('done')

        if lang != '':
            print('caption --', end=' ')
            caption = video.captions.get_by_language_code(lang)
            caption = caption.generate_srt_captions()

            file = open(os.path.join(path, title + '.srt'), 'w')
            file.write(caption)
            file.close()
            print('done')

        if description != '':
            print('description --', end=' ')
            file = open(os.path.join(path, title + '.txt'), 'w')
            file.write(description)
            file.close()
            print('done')

        video_ext = ['mp4', 'mkv']
        audio_ext = ['m4a', 'webm']

        video_file = [
            file for file in os.listdir(path)
            if os.path.isfile(os.path.join(path, file)) and file[0] != '.' and file.split('.')[-1] in video_ext
        ]

        audio_file = [
            file for file in os.listdir(path)
            if os.path.isfile(os.path.join(path, file)) and file[0] != '.' and file.split('.')[-1] in audio_ext
        ]

        video_file = ffmpeg.input(os.path.join(path, video_file[0]))
        audio_file = ffmpeg.input(os.path.join(path, audio_file[0]))

        ffmpeg.concat(video_file, audio_file, v=1, a=1).output(os.path.join(path, '_' + title + '.mp4')).run()
        print('Done!')
示例#11
0
    def download(self, mime_type_ext: str):
        self.video_stream.download(output_path='tmp', filename='video')
        self.audio_stream.download(output_path='tmp', filename='audio')

        video_path = BASE_DIR.joinpath('tmp', f'video.{mime_type_ext}')
        audio_path = BASE_DIR.joinpath('tmp', f'audio.{mime_type_ext}')

        input_video = ffmpeg.input(video_path)
        input_audio = ffmpeg.input(audio_path)

        ffmpeg.concat(input_video, input_audio, v=1,
                      a=1).output(f'{self.youtube.title}.mp4').run()
示例#12
0
def concatVideos(files, path):
    """
    Purpose: concatenate the weba files into a mp4 file (downloads the file on computer)
    Parameters: a list of files to concatenate
    Return val: None
    """
    name = findID(files[0]) + '.mp4'
    streams = []
    for file in files:  # for every file in files with the same ID
        streams.append(ffmpeg.input(file))
    ffmpeg.concat(*streams, v=0,
                  a=1).output(name).run()  # concatenate the files
示例#13
0
def test_fluent_concat():
    base = ffmpeg.file_input('dummy.mp4')
    trimmed1 = base.trim(10, 20)
    trimmed2 = base.trim(30, 40)
    trimmed3 = base.trim(50, 60)
    concat1 = ffmpeg.concat(trimmed1, trimmed2, trimmed3)
    concat2 = ffmpeg.concat(trimmed1, trimmed2, trimmed3)
    concat3 = ffmpeg.concat(trimmed1, trimmed3, trimmed2)
    concat4 = ffmpeg.concat()
    concat5 = ffmpeg.concat()
    assert concat1 == concat2
    assert concat1 != concat3
    assert concat4 == concat5
示例#14
0
    async def _sequencer(self, ctx, vstream, astream, kwargs):
        len_to_speed = (1, 2, 1, 0.5)

        # Make vstream/astream for rests
        vstream = vstream.split()
        vrest = vstream[1].filter('eq', brightness=-1)
        vstream = vstream[0]
        astream = astream.asplit()
        arest = astream[1].filter('volume', volume=-100, precision='fixed')
        astream = astream[0]

        # Make vstream/astream for final result
        vstream = vstream.split()
        vfinale = vstream[1]
        vstream = vstream[0]
        astream = astream.asplit()
        afinale = astream[1]
        astream = astream[0]

        # Making the song
        notes = kwargs['notes']
        for note in notes:
            note_tone, note_len = note[0], len_to_speed[note[1]]
            vnote = None
            anote = None

            if (note_tone == '.'):
                vrest = vrest.split()
                vnote = vrest[1]
                arest = arest.asplit()
                anote = arest[1]
            else:
                vstream = vstream.split()
                vnote = vstream[1]
                astream = astream.asplit()
                anote = astream[1].filter('rubberband', pitch=note_tone)

            if (note_len != 1):
                vnote = vnote.filter('setpts', f'{1.0/note_len}*PTS')
                anote = anote.filter('atempo', note_len)
            vfinale = ffmpeg.concat(vfinale, vnote, v=1, a=0)
            afinale = ffmpeg.concat(afinale, anote, v=0, a=1)

            if (note_tone == '.'):
                vrest = vrest[0]
                arest = arest[0]
            else:
                vstream = vstream[0]
                astream = astream[0]

        return vfinale, afinale, {}
示例#15
0
def changemavoicever2(file_path,inputlanguage,outputlanguage):
    file_path="./media/"+str(file_path)
    orgfile=os.path.basename(str(file_path))
    filenaam='LQ'+os.path.basename(str(file_path))
    data_preprocess(file_path)
    lang=inputlanguage
    lang1=outputlanguage
    
    languagemodel=optionsSTT[lang]
    languagemodel1=optionTTS[lang1]
    speechtotext(languagemodel)
    
    print("-------------------------")
    print("SPEECH TO TEXT DONE")

    langa= texttotext(d,optionTTsl[lang],optionTTTtl[lang1])
    print("-------------------------")
    print("TEXT TO TEXT DONE")
    texttospeech(langa,languagemodel1)
    print("-------------------------")
    print("TEXT TO SPEECH DONE")

    gen= vidgender(file_path)
    if gen=="Male":     
        xcv='python3 convert.py'
        subprocess.call(xcv,shell=True)
        male()

    os.chdir('./result/')

    lipGAN='python3 batch_inference.py --checkpoint_path logs/lipgan_residual_mel.h5 --model residual --face "testvideo.mp4" --fps 24 --audio ./gan/welcome.wav --results_dir ./video'
    subprocess.call(lipGAN,shell=True)
    commander="ffmpeg -i './video/result_voice.avi' -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict experimental -f mp4 './video/result_voice.mp4'"
    subprocess.call(commander,shell=True)
    commander2="ffmpeg -i './video/result.avi' -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict experimental -f mp4 './video/result.mp4'"
    subprocess.call(commander2,shell=True)
    os.chdir('..')
    input_video = ffmpeg.input('./result/video/result.mp4')
    # if gen=="Male":
    #     input_audio1 = ffmpeg.input('./result/audio/welcome.wav')
    # else:
    #     input_audio1 = ffmpeg.input('./result/gan/welcome.wav')
    input_audio1 = ffmpeg.input('./result/audio/welcome.wav')
    ffmpeg.concat(input_video, input_audio1, v=1, a=1).output('./media/Videos/translated'+orgfile).run()

    transfilenaam='./media/Videos/'+filenaam
    if gen=="Male":
        shutil.copy('./result/video/result_voice.mp4',transfilenaam)

    finalpath=orgfile
    return gen,finalpath
示例#16
0
    def download(self, output_path):
        # Check video extension
        base, ext = os.path.splitext(output_path)
        if ext != "mp4":
            output_path = "{}.mp4".format(base)

        # Download video and audio to temporary files.
        temp_video_path = get_random_path(_TEMP_DOWNLOAD_DIR, "mp4")
        # TODO: Download video and audio asynchronously
        with open(temp_video_path, "wb") as f:
            req = requests.get(self.video_url)
            if req.status_code != 200:
                raise DownloadException(
                    "Failed to download video from {}: {} response".format(
                        self.video_url, req.status_code
                    )
                )
            f.write(req.content)
        if self.audio_url is not None:
            temp_audio_path = get_random_path(_TEMP_DOWNLOAD_DIR, "mp4")
            with open(temp_audio_path, "wb") as f:
                req = requests.get(self.audio_url)
                if req.status_code != 200:
                    raise DownloadException(
                        "Failed to download video from {}: {} response".format(
                            self.audio_url, req.status_code
                        )
                    )
                f.write(req.content)

            # Combine video and audio
            video = ffmpeg.input(temp_video_path)
            audio = ffmpeg.input(temp_audio_path)
            try:
                ffmpeg.concat(video, audio, v=1, a=1).output(output_path).run(
                    quiet=True, overwrite_output=True
                )
            except ffmpeg.Error:
                if os.path.exists(output_path):
                    os.remove(output_path)
                print("ERROR:", output_path)  # DEBUG
                raise DownloadException("Failed to combine video and audio with FFmpeg")
            finally:
                # Delete temporary files
                os.remove(temp_video_path)
                os.remove(temp_audio_path)
        else:
            shutil.move(temp_video_path, output_path)

        return output_path
示例#17
0
def main():
    mass_parts = mass_part_inputs(os.path.join(MASS_VIDEOS_DIR, '2020-05-23'))

    # TODO Read from mass video
    FRAME_RATE = 25

    # Before/after
    before_logo = input('stalbans_logo_5.mp4')
    after_screen = input('after-message.png', loop_frames=FRAME_RATE * 30)

    # Superimposed bits
    announcements = input('announcements-background.png')
    offertory = input('offertory-background.png')

    print(before_logo)
    # print(mass_parts)
    print(after_screen)

    split_last_mass_part = ffmpeg.filter_multi_output(mass_parts[-1], 'split')
    mass_parts[-1] = split_last_mass_part.stream(0)
    last_mass_part_fade = split_last_mass_part.stream(1)
    # ffmpeg.concat(split0, split1).output('out.mp4').run()

    print(mass_parts[-1])
    mass_parts[-1] = ffmpeg.trim(mass_parts[-1], end=10)
    print(mass_parts[-1])

    result = ffmpeg.concat(
        mass_parts[-1],
        # ffmpeg.filter([last_mass_part_fade, after_screen], 'xfade'),
        after_screen,
    ).output('out.mp4')
    print(' '.join(ffmpeg.get_args(result)))
    result.run()
示例#18
0
    def start(self):
        Logger.LOGGER.log(Logger.TYPE_INFO,
                          'Starting Server, output to: {}'.format(self.output))

        in1 = ffmpeg.input('pipe:')
        v1 = ffmpeg.drawtext(in1['v'],
                             '%{localtime:%R}',
                             x=c.SERV_DRAWTEXT_X,
                             y=c.SERV_DRAWTEXT_Y,
                             escape_text=False,
                             shadowcolor=c.SERV_DRAWTEXT_SHADOW_COLOR,
                             shadowx=c.SERV_DRAWTEXT_SHADOW_X,
                             shadowy=c.SERV_DRAWTEXT_SHADOW_Y,
                             fontsize=c.SERV_DRAWTEXT_FONT_SIZE,
                             fontfile=c.SERV_DRAWTEXT_FONT_FILE,
                             fontcolor=c.SERV_DRAWTEXT_FONT_COLOR)
        a1 = in1['a']
        joined = ffmpeg.concat(v1, a1, v=1, a=1)

        self.ff = ffmpeg.output(joined,
                                self.output,
                                vcodec='h264',
                                aspect=c.SERV_OUTPUT_ASPECT,
                                acodec=c.SERV_OUTPUT_ACODEC,
                                crf=c.SERV_OUTPUT_CRF,
                                preset=c.SERV_OUTPUT_PRESET,
                                format='flv',
                                pix_fmt='yuv444p')

        self.cmd = ['ffmpeg', '-re'] + ffmpeg.get_args(self.ff)
        self.process = subprocess.Popen(self.cmd,
                                        stdin=subprocess.PIPE,
                                        stdout=devnull,
                                        stderr=devnull)
        return self.process
示例#19
0
    async def _concat(self, ctx, vstream, astream, kwargs):
        first_vid_filepath = kwargs['first_vid_filepath']
        second_vid_filepath = kwargs['input_filename']

        target_width = 640
        target_height = 480
        first_vid_metadata = FFProbe(first_vid_filepath)
        second_vid_metadata = FFProbe(second_vid_filepath)
        for stream in first_vid_metadata.streams + second_vid_metadata.streams:
            if(stream.is_video()):
                width, height = stream.frame_size()
                target_width = min(target_width, width)
                target_height = min(target_height, height)

        first_stream = ffmpeg.input(first_vid_filepath)
        vfirst = (
            first_stream.video
            .filter('scale', w=target_width, h=target_height)
            .filter('setsar', r='1:1')
        )
        afirst = first_stream.audio
        
        vstream = (
            vstream
            .filter('scale', w=target_width, h=target_height)
            .filter('setsar', r='1:1')
        )

        joined = ffmpeg.concat(vfirst, afirst, vstream, astream, v=1, a=1).node
        return (joined[0], joined[1], {'vsync':0})
def trim(input_path, output_path, start=20, end=22):
    """
    Trim the video using the ffmpeg-python.
    :param input_path: string
    :param output_path: string
    :param start: seconds
    :param end: seconds
    :return: video snippet (output_path)
    """
    try:
        print(f'input path: {input_path}')
        if not os.path.isfile(input_path):
            print("cannot find the input file")
            return "unfinished"

        input_stream = ffmpeg.input(input_path)

        vid = (input_stream.video.trim(start=start,
                                       end=end).setpts('PTS-STARTPTS'))
        aud = (input_stream.audio.filter_('atrim', start=start,
                                          end=end).filter_(
                                              'asetpts', 'PTS-STARTPTS'))

        joined = ffmpeg.concat(vid, aud, v=1, a=1).node
        output = ffmpeg.output(joined[0], joined[1], output_path)
        output.run()
        print(f"snippet created in {output_path}")
        return "finished"
    except:
        print(f"snippet failed in {output_path}")
        return "unfinished"
示例#21
0
def test_fluent_complex_filter():
    in_file = ffmpeg.input('dummy.mp4')
    return ffmpeg.concat(
        in_file.trim(start_frame=10, end_frame=20),
        in_file.trim(start_frame=30, end_frame=40),
        in_file.trim(start_frame=50, end_frame=60),
    ).output('dummy2.mp4')
示例#22
0
 def receive(self, text_data):
     text_data_json = json.loads(text_data)
     params = text_data_json
     params['username'] = '******'
     input_vid = ffmpeg.input(params['url'])
     vid = input_vid.trim(start=params['start_time'], end=params['end_time']).setpts('PTS-STARTPTS')
     aud = input_vid.filter_('atrim', start=params['start_time'], end=params['end_time']).filter_('asetpts', 'PTS-STARTPTS')
     joined = ffmpeg.concat(vid, aud, v=1, a=1).node
     stream = ffmpeg.output(joined[0], joined[1], f"out_t_{params['username']}.mp4")
     stream = stream.overwrite_output()
     log = LogController.add_log(params['username'], params['start_time'], params['end_time'], params['url'])
     cmd = ffmpeg.compile(stream)
     process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
     for line in process.stdout:
         duration_res = re.search(r'\sDuration: (?P<duration>\S+)', line)
         if duration_res is not None:
             duration = duration_res.groupdict()['duration']
             duration = re.sub(r',', '', duration)
             
         result = re.search(r'\stime=(?P<time>\S+)', line)
         if result is not None:
             elapsed_time = result.groupdict()['time']
             progress = (get_seconds(elapsed_time) / get_seconds(duration)) * 100
             print("进度:%3.2f" % progress + "%")
             # time.sleep(1)
             self.send(text_data=json.dumps({'progress': progress}))
     process.wait()
     if process.poll() == 0:
         self.send(text_data=json.dumps({'progress': 100.00}))
         self.send(text_data=json.dumps({'msg': 'success'}))
示例#23
0
文件: clip.py 项目: eqy/autotosis
    def _trim(self, dest, start, end, predskip=1, notext=False):
        input_stream = ffmpeg.input(self.filename)
        print(start, end)
        # TODO: this part is exceptionally slow... seems like ffmpeg is
        # processing all frames and then dropping the irrelevant ones
        # when we just need 5-15 seconds of frames processed
        vid = (input_stream.video.trim(start=start,
                                       end=end).setpts('PTS-STARTPTS'))
        if not notext:
            x = self.width // 2 - self.box_width // 2
            vid = vid.drawbox(x=x,
                              y=120,
                              height=self.box_height,
                              width=self.box_width,
                              color='black',
                              t='fill')

            for i in range(start, end):
                second_preds = self.inference_results[i]
                diff_preds = self.diff_inference_results[i]

                vid = self._drawtext(vid, i - start, second_preds, diff_preds,
                                     predskip)

        aud = (input_stream.audio.filter_('atrim', start=start,
                                          end=end).filter_(
                                              'asetpts', 'PTS-STARTPTS'))

        joined = ffmpeg.concat(vid, aud, v=1, a=1).node
        output = ffmpeg.output(joined[0], joined[1], dest)
        # output = ffmpeg.overwrite_output(output)
        output.run()
示例#24
0
 def repeat(self) -> None:
     """Concatenates streams with itself to make a twice as long stream"""
     if self.has_video:
         stream_v = self.stream_v.split()
         joined = ffmpeg.concat(stream_v[0].filter("fifo"),
                                stream_v[1].filter("fifo"),
                                v=1,
                                a=0).node
         self.stream_v = joined[0]
     if self.has_audio:
         stream_a = self.stream_a.asplit()
         joined = ffmpeg.concat(stream_a[0].filter("afifo"),
                                stream_a[1].filter("afifo"),
                                v=0,
                                a=1).node
         self.stream_a = joined[0]
示例#25
0
def build_playlist_movie(tmp_file_paths,
                         movie_file_path,
                         width=None,
                         height=1080,
                         fps="24.00"):
    """
    Build a single movie file from a playlist.
    """
    in_files = []
    if len(tmp_file_paths) > 0:
        (first_movie_file_path, _) = tmp_file_paths[0]
        if width is None:
            (width, height) = get_movie_size(first_movie_file_path)

        for tmp_file_path, file_name in tmp_file_paths:
            if not has_soundtrack(tmp_file_path):
                add_empty_soundtrack(tmp_file_path)

        for tmp_file_path, file_name in tmp_file_paths:
            in_file = ffmpeg.input(tmp_file_path)
            in_files.append(in_file["v"].filter("setsar", "1/1").filter(
                "scale", width, height))
            in_files.append(in_file["a"])

        joined = ffmpeg.concat(*in_files, v=1, a=1).node
        video = joined[0]
        audio = joined[1]

        try:
            ffmpeg.output(audio, video,
                          movie_file_path).overwrite_output().run()
        except Exception as e:
            print(e)
            return {"success": False, "message": str(e)}
    return {"success": True}
示例#26
0
def concatenateVideos(args):
    files = []
    streams = []
    for fileName in os.listdir(args.directorySrc):
        files.append(fileName)
        fileName = os.path.join(args.directorySrc, fileName)
        #probe = ffmpeg.probe(fileName)
        #print(probe)

    files.sort()
    for f in files:
        print('Got file: ' + f)
        streams.append(ffmpeg.input(fileName, format='concat'))
        print(f)

    fname = os.path.splitext(files[0])[0]
    fext = os.path.splitext(files[0])[1]
    dest = fname + "_full" + fext
    dest = os.path.join(args.directoryDst, dest)

    print("Concatenate to: " + dest)
    l = len(streams)
    print("length of files: " + str(l))

    #    cmd = ffmpeg.concat(*streams).output(dest, codec='copy').compile()
    c = ffmpeg.concat(*streams)
    cmd = ffmpeg.output(c, dest).compile()

    print("COMPLETE concatenation to: " + dest)
    print("COMPLETE concatenation command : " + ' '.join(cmd))
示例#27
0
文件: dl.py 项目: leegggg/ogmlib
def test(orgasmos):
    import ffmpeg
    from datetime import datetime
    stream = ffmpeg.input(
        "data\\upload\\orgasmos\\1-x-ti\\pqaY1OJ9eWOzWgVZDn-L1TAnkcdnJrUR.mp3")
    stream = ffmpeg.filter(stream, 'loudnorm')
    concatList = []
    selected = {1: stream}
    for _ in range(5):
        id = 1
        stream = selected.get(id)
        streams = ffmpeg.filter_multi_output(stream, 'asplit')
        selected[id] = streams.stream(0)
        stream = streams.stream(1)
        concatList.append(stream)

    fullStream = ffmpeg.concat(*concatList, v=0, a=1)
    outputFilename = "mix-test-{:d}.m4a".format(int(
        datetime.now().timestamp()))
    outputPath = basePath.joinpath("mixed").joinpath(outputFilename)
    fullStream = fullStream.output(str(outputPath))
    cmd = ffmpeg.compile(fullStream)
    print(cmd)
    ffmpeg.view(fullStream, filename=str(outputPath) + ".png")
    # ffmpeg.run(fullStream)
    pass
示例#28
0
def create_slide(imagefile, audiofile, width, height, outputts, rate):
    # probe input audio
    probe = ffmpeg.probe(audiofile)
    audio_stream = next(
        (stream
         for stream in probe['streams'] if stream['codec_type'] == 'audio'),
        None)
    length = float(audio_stream['duration'])

    baseimagefile, extimagefile = os.path.splitext(imagefile)

    for i in range(2):
        copyfile(imagefile, "%s_%d%s" % (baseimagefile, i, extimagefile))

    # each slide is half second video made of two images
    slide = ffmpeg.input("%s_%s%s" % (baseimagefile, "%d", extimagefile), r=4)

    n = ceil(length * 2)  # We will need that many "slides" of half second each

    slidenames = [slide for i in range(n)]
    slides = ffmpeg.concat(*slidenames).filter('scale', width, height).filter(
        'setdar', '16/9')

    # combine all slides with audio, and save
    audio = ffmpeg.input(audiofile)
    ffmpeg.output(slides, audio, outputts,
                  r=4).overwrite_output().run(quiet=QUIET_FFMPEG)
    return [outputts]
示例#29
0
    def saveVideo(self, targetFilePath, frameCount, frameRate, includeReversed):
        if frameCount < 10:
            raise ValueError('frameCount must be greater than 10')
        tempDir = tempfile.TemporaryDirectory()
        alphaIncrement = 1 / (frameCount - 1)
        for index in range(frameCount):
            alpha = index * alphaIncrement
            image = self.getImageAtAlpha(alpha)
            path = os.path.join(tempDir.name, f'{index}.png')
            imageio.imwrite(path, image)

        if includeReversed is False:
            (
                ffmpeg
                    .input(os.path.join(tempDir.name, '*.png'), pattern_type='glob', framerate=frameRate)
                    .output(targetFilePath)
                    .run()
            )
        else:
            tempVideoPath = os.path.join(tempDir.name, 'temp.mp4')
            (
                ffmpeg
                    .input(os.path.join(tempDir.name, '*.png'), pattern_type='glob', framerate=frameRate)
                    .output(tempVideoPath)
                    .run()
            )
            in1 = ffmpeg.input(tempVideoPath)
            in2 = ffmpeg.input(tempVideoPath)
            v2 = in2.video.filter('reverse')
            joined = ffmpeg.concat(in1, v2).node
            v3 = joined[0]
            out = ffmpeg.output(v3,  targetFilePath)
            out.run()
示例#30
0
def build_playlist_movie(tmp_file_paths,
                         movie_file_path,
                         width=None,
                         height=1080,
                         fps="24.00"):
    """
    Build a single movie file from a playlist.
    """
    in_files = []
    if len(tmp_file_paths) > 0:
        (first_movie_file_path, _) = tmp_file_paths[0]
        if width is None:
            (width, height) = get_movie_size(first_movie_file_path)

        for tmp_file_path, file_name in tmp_file_paths:
            in_file = ffmpeg.input(tmp_file_path)
            in_files.append(in_file['v'].filter('setsar', '1/1').filter(
                'scale', width, height))
            in_files.append(in_file['a'])
        joined = ffmpeg.concat(*in_files, v=1, a=1).node
        video = joined[0]
        audio = joined[1]

        try:
            ffmpeg \
                .output(audio, video, movie_file_path) \
                .overwrite_output() \
                .run()
        except Exception as e:
            print(e)
    return movie_file_path