コード例 #1
0
def process():
    global output
    global audiopath

    #find the codec
    mouthData = rhubarb()

    if output.find(".mp4") >= 0:
        codec = "libx264"
        video = True

    elif output.find(".avi") >= 0:
        codec = "png"
        video = True

    #add in image sequence format and convert to path + img identifier using the foldername
    elif output.find(".folder"):
        #make directory
        output = output.replace(".folder", "")
        mkdir(output)
        #make identifier + complete output path
        ident = output.split("/")
        #complete output
        output = output + "\\" + ident[len(ident) - 1] + "%06d.png"

        video = False

    imageclips = list()

    for data in mouthData:
        imageclips.append(videoImage(data))

    #concatenate all
    final = concatenate_videoclips(imageclips, method="compose")

    #Render out, if video True = Render video and add audio, else render imagesequence
    if video == True:
        final = final.set_audio(AudioFileClip(audiopath))
        final.write_videofile(output, codec=codec, fps=60)

    elif video == False:
        final.write_images_sequence(fps=60, withmask=True, nameformat=output)
コード例 #2
0
def demo(filename,
         tracking,
         output,
         t_start=0.,
         t_end=None,
         shift=0.,
         labels=None,
         landmark=None,
         height=200,
         ffmpeg=None):

    # parse label file
    if labels is not None:
        with open(labels, 'r') as f:
            labels = {}
            for line in f:
                identifier, label = line.strip().split()
                identifier = int(identifier)
                labels[identifier] = label

    video = Video(filename)

    if ffmpeg is not None:
        import os
        os.environ['IMAGEIO_FFMPEG_EXE'] = ffmpeg

    from moviepy.editor import VideoClip, AudioFileClip

    make_frame = get_make_frame(video,
                                tracking,
                                landmark=landmark,
                                labels=labels,
                                height=height,
                                shift=shift)
    video_clip = VideoClip(make_frame, duration=video.duration)
    audio_clip = AudioFileClip(filename)
    clip = video_clip.set_audio(audio_clip)

    if t_end is None:
        t_end = video.duration

    clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)
コード例 #3
0
    def genVideo(self):
        #加载一个图片clip
        clip = [ImageClip(self.imagepath)]

        # 用一个文本文件内容生成字幕
        #credits = credits1('../../credits/credits.txt', 3 * clip.w / 4)
        credits = credits1(self.textpath, 3 * clip.w / 4)
        scrolling_credits = credits.set_pos(lambda t: ('center', -10 * t))

        # 添加音频
        audio = AudioFileClip(self.musicpath)

        final = CompositeVideoClip([clip, scrolling_credits])
        final_video = final.set_audio(audio)
        final_video.write_videofile(str(self.name) + '.mp4',
                                    fps=30,
                                    codec='mpeg4',
                                    preset='ultrafast',
                                    audio_codec="libmp3lame",
                                    threads=4)
コード例 #4
0
def merge_audio_video(audio_file: str, video_file: str, frame_number: int = 0):
    audio_clip = AudioFileClip(audio_file)
    video_clip = VideoFileClip(video_file)

    start_time, video_fps = starting_point_in_time(video_file, frame_number)

    # audio_clip.duration = video_clip.duration
    orig_video_duration = video_clip.duration
    orig_audio_duration = audio_clip.duration

    audio_clip = audio_clip.subclip(
        0, min(orig_video_duration - start_time, orig_audio_duration))

    first_part = video_clip.subclip(0, start_time)
    second_part = video_clip.subclip(start_time, orig_video_duration)

    second_part.audio = audio_clip

    final_video_clip = concatenate_videoclips([first_part, second_part])
    return final_video_clip
コード例 #5
0
def split_audio_from_video(video_input_path, audio_output_path):
    """This function is used to extract voice from a video.
    Args:
        video_input_path: path of the origin video
        audio_output_path: path to the voice file
    Returns: int
        on success this function returns 0
        on failure this function returns 1
    """
    if not os.path.isfile(video_input_path):
        print("invalid video path")
        return FAILED
    my_audio_clip = AudioFileClip(video_input_path)
    my_audio_clip.write_audiofile(audio_output_path, codec='libvorbis')
    if not os.path.isfile(audio_output_path):
        print("invalid output path")
        my_audio_clip.close()
        return FAILED
    my_audio_clip.close()
    return SUCCESS
コード例 #6
0
ファイル: predict.py プロジェクト: Suvi1301/chirp-chirp
def _spectrogram(filename: str, path: str):
    ''' Generates a Spectrogram from an mp3 and saves it'''
    try:
        audio = AudioFileClip(f'{path}/{filename}.mp3')
        audio_data = audio.to_soundarray()
        audio_data = audio_data[:, 0]
        vmin = 20 * np.log10(np.max(audio_data)) - 100
        fig = plt.figure()
        plt.specgram(audio_data,
                     Fs=SPECTROGRAM_PARAMS['FRAME_RATE'],
                     NFFT=SPECTROGRAM_PARAMS['NFFT'],
                     window=np.hamming(512),
                     cmap='inferno',
                     vmin=vmin)
        fig.savefig(f'{path}/{filename}.jpg')
        plt.close(fig)
        LOG.info(f'Successfully saved Spectrogram {filename}.jpg to {path}')
    except Exception as ex:
        LOG.error(
            f'Failed to convert {filename} to Spectrogram. Reason="{ex}"')
コード例 #7
0
def generate_text_clip(text, number):
    filename = "tmp/" + name + "/clips/" + name + number + ".mp4"

    if not os.path.exists(filename):
        audio_filename = make_tts(text, number)
        audio = AudioFileClip(audio_filename)
        image = ImageClip(background_image).set_fps(30)
        video = image.set_duration(audio.duration)
        withaudio = video.set_audio(audio)

        fontsize = (len(text) + 10) / withaudio.w
        text_clip = TextClip(text,
                             fontsize=fontsize,
                             size=(withaudio.w, withaudio.h)).set_pos("center")

        final_clip = CompositeVideoClip(
            [withaudio, text_clip.set_duration(video.duration)])

        final_clip.write_videofile(filename)
    return filename
コード例 #8
0
def startPreview(onlyAudio, url, cut, low, high, volume):

    clip = None
    if onlyAudio:
        clip = AudioFileClip(url)
    else:
        clip = VideoFileClip(url)

    if volume != 0:

        clip = clip.volumex(volume)

    if cut:
        clip = clip.subclip(low, high)
    print("PREVIEWING WITH MOVIEPY")
    clip.preview()
    clip.close()

    # See https://github.com/Zulko/moviepy/issues/575
    pygame.quit()
コード例 #9
0
    def build_movie(self):
        """ Concatenate self._full_frames images into video file, add back original music. """
        from moviepy.editor import AudioFileClip, ImageSequenceClip

        outname = self.decomposer.wav_file.replace('input', 'output')
        outname = outname.replace('wav', 'mp4')

        output = ImageSequenceClip(
            [self._generate_keyboard(t)[0] for t in range(self.decomposer.chromagram_raw.shape[1])], fps=self.fps_out/2
        )
        output = output.cutout(0, 1)  # trim to compensate for FFT lag
        output = output.set_audio(AudioFileClip(self.decomposer.wav_file))
        output.write_videofile(
            outname,
            fps=self.fps_out,
            temp_audiofile="temp-audio.m4a",
            remove_temp=True,
            codec="libx264",
            audio_codec="aac"
        )
コード例 #10
0
    def _get_samples(self, data_file):

        time = self.dict_files[data_file]['time']

        if 'audio' in self.input_type.lower():
            audio_clip = AudioFileClip(str(data_file))
            clip = audio_clip.set_fps(16000)
            num_samples = int(clip.fps * (time[1] - time[0]))
        elif 'video' in self.input_type.lower():
            clip = VideoFileClip(str(data_file))

        if self.dict_files[data_file]['labels'].shape[0] == 1:
            clip_list = np.reshape(
                np.array(list(clip.iter_frames())).mean(1), (1, -1))

            return clip_list, self.dict_files[data_file]['labels']

        frames = []
        for i in range(len(time) - 1):
            start_time = time[i]
            end_time = time[i + 1]
            data_frame = np.array(
                list(clip.subclip(start_time, end_time).iter_frames()))

            if 'audio' in self.input_type.lower():
                data_frame = np.squeeze(data_frame)
                data_frame = data_frame.mean(1)[:num_samples]

            frames.append(data_frame.astype(np.float32))

        self.shape = data_frame.shape

        if i == 0:
            chunk_size = 640  # split audio file to chuncks of 40 ms
            audio = np.pad(data_frame,
                           (0, chunk_size - data_frame.shape[0] % chunk_size),
                           'constant')
            audio = np.reshape(audio, (-1, chunk_size)).astype(np.float32)
            frames = [audio]

        return frames, self.dict_files[data_file]['labels']
コード例 #11
0
    def _get_samples(self, data_file: str, label_file: str):

        file_data, attrs_name = self.labelfile_reader.read_file(label_file)
        file_data = np.array(file_data).astype(np.float32)

        clip = AudioFileClip(str(data_file), fps=self.fps)

        # CHANGED (sample duration of 40 ms (or 100 ms))
        # Every sample has the same label
        #sample_duration = 0.04
        sample_duration = 1
        num_samples = int(self.fps * sample_duration)

        seq_num = int(clip.duration / sample_duration)

        labels = np.repeat(file_data, seq_num, axis=0)

        frames = []
        for i in range(seq_num):
            start_time = i * sample_duration
            end_time = (i + 1) * sample_duration

            data_frame = np.array(
                list(clip.subclip(start_time, end_time).iter_frames()))
            data_frame = data_frame.mean(1)[:num_samples]

            frames.append(data_frame.astype(np.float32))

        frames = np.array(frames).astype(np.float32)
        labels = np.array(labels).astype(np.float32)

        #print(frames.shape)
        #print(frames[0])
        #print(labels)
        #print(labels.shape)
        #print(data_file)
        #print(seq_num)
        #print(num_samples)
        #print(attrs_name)

        return frames, labels, seq_num, num_samples, attrs_name
コード例 #12
0
        def process_clip():
            clip = VideoFileClip(file_path, target_resolution=[720, 1280])
            # I WAS going to get the last 10 seconds but nvm
            if clip.duration > 10:
                clip = clip.subclip(0, -clip.duration + 10)

            safe_duration = max(0, clip.duration - 0.1)

            # Freeze fram stuff
            sound = AudioFileClip("assets/fnafjumpscare/sound.mp3")
            gif = VideoFileClip("assets/fnafjumpscare/scare.gif", target_resolution=[720, 1280])\
                .fx(vfx.mask_color, color=[255,255,255]).set_duration(sound.duration)
            freeze_frame = ImageClip(clip.get_frame(safe_duration))\
                .set_duration(sound.duration)
            freeze_compos = CompositeVideoClip([freeze_frame, gif])\
                .set_duration(sound.duration).set_audio(sound)

            # Final clip
            final_clip = concatenate_videoclips([clip, freeze_compos])

            return final_clip, [clip, sound, freeze_frame, gif, freeze_compos]
コード例 #13
0
ファイル: audio_mixer.py プロジェクト: raffienficiaud/livius
    def get_outputs(self):
        super(AudioMixerJob, self).get_outputs()

        input_video = os.path.join(self.video_location, self.video_filename)

        clip = AudioFileClip(input_video)

        def apply_effects(get_frame, t):
            """Function that chains together all the post processing effects."""

            frame = get_frame(t)

            if frame.shape[1] < 2:
                return frame

            a = self.mixing_left / (self.mixing_left + self.mixing_right)
            mixed = a * frame[:, 0] + (1 - a) * frame[:, 1]
            return np.vstack([mixed, mixed]).transpose()

        # retains the duration of the clip
        return clip.fl(apply_effects, keep_duration=True)
コード例 #14
0
    def split_songs(self):
        """ Creates songs from downloaded audio file by clipping it using provided time links. """

        # Instantiates AudioFileClip object from Moviepy module
        audio = AudioFileClip(self.filename)

        # List of time links scraped from audio file's YouTube page
        times = self.get_time_links()

        # Creates songs based on number of time links scraped
        for i in range(0, len(times)):

            # Time when song starts in audio file
            start_time = Splitter.time_str_to_tuple(times[i])

            # Time when song ends in audio file or None if last song
            end_time = None if i == (len(times) - 1) else Splitter.time_str_to_tuple(times[i + 1])

            # Creates song
            song = audio.subclip(start_time, end_time)
            song.write_audiofile("clip{}.mp3".format(i+1))
コード例 #15
0
ファイル: Effect.py プロジェクト: Fruitseye/Fun_projects
def Dark_MODE(clip):
    audio=AudioFileClip("soundtrack.mp3")
    
    clip1=clip
    clip2=clip1.fx(vfx.mirror_x)
    clip3=clip1.fx(vfx.mirror_y)
    clip4=clip2.fx(vfx.mirror_y)


    final_clip=clips_array([[clip1,clip2],[clip3,clip4]])
    if(final_clip.duration>=audio.duration):
        ##Audio loops to video size
        audio = (audio.audio_loop( audio, duration=final_clip.duration).audio_fadeout(2))
        #final_clip=final_clip.subclip(0,dark_audio.duration)
    else:
        ##Audio loops
        audio=(audio.subclip(0,final_clip.duration).audio_fadeout(2))
    
    #audio=audio.fadeout(2)
    final_clip=final_clip.set_audio(audio)
    final_clip.resize(width=480).write_videofile("Dark.mp4")
コード例 #16
0
    def recognize(self, path_to_file):
        """

        :param path_to_file:
        :return:
        """

        logging.info(f"Speech Recognizer is successfully initialized")

        # Retrieving audio length
        audio_len = AudioFileClip(path_to_file).duration

        with sr.AudioFile(path_to_file) as source:

            # Creating AudioData instance
            audio = self.recognizer.record(source, duration=audio_len)

            # Using Google speech recognition
            logging.info(f"Converting audio into text ...")
            text = self.recognizer.recognize_google(audio, language="en-US")
            return text
コード例 #17
0
def main(width, height, text, music, output):
    with open(text, 'r', encoding='utf-8') as f:
        word_list = f.readlines()
    words = "".join(word_list)
    words_num = len(filter_text(words))

    # 每个字的时长
    time_len = librosa.get_duration(filename=music)
    unit_time = time_len / words_num

    # 生成每句话的TextClip
    clips = []
    start = 0
    end = 0
    for text in word_list:
        start = end
        text = filter_text(text)
        end = start + unit_time * len(text)
        text_clip = TextClip(
            text,
            fontsize=width // 12,
            color='white',
            size=(width, height),
            method='caption',
            font='msyhbd.ttc')\
            .set_start(start)\
            .set_end(end)
        text_clip = text_clip.set_pos('center')
        clips.append(text_clip)

    # 生成最终的视频文件
    final_clip = CompositeVideoClip(clips)
    audio_clip = AudioFileClip(music)
    final_video = final_clip.set_audio(audio_clip)
    final_video.write_videofile(output,
                                fps=30,
                                codec='mpeg4',
                                preset='ultrafast',
                                audio_codec="libmp3lame",
                                threads=4)
コード例 #18
0
def renderVideos(data):
    cliparr = []

    for entry in data:
        #if data[entry] == "5.mp4":
        #break
        c = VideoFileClip(f"raw_videos/{data[entry]}",
                          target_resolution=(1080, 1920))
        t = TextClip(entry, fontsize=50, color='white')
        #width, height
        t = t.set_position((0.1, 0.8), relative=True).set_duration(c.duration)
        c = CompositeVideoClip([c, t])

        cliparr.append(c)

    final_clip = concatenate_videoclips(cliparr, method='compose')
    final_clip = final_clip.fx(volumex, 0.3)
    audio_background = AudioFileClip(BACKGROUND_MUSIC_PATH).set_duration(
        final_clip.duration)
    final_audio = CompositeAudioClip([final_clip.audio, audio_background])
    ret_clip = final_clip.set_audio(final_audio)
    return ret_clip
コード例 #19
0
    def _get_samples(self, data_file:str, label_file:str):
        """ Read samples from file.
        
        Args:
          data_file (str): File to read the data from.
          label_file (str): File to read the labels from.
        
        Returns:
          frames (np.array): Frames of each file.
          labels (np.array): Labels for each frame.
          seq_num (int): Number of samples in file.
          num_samples (int): Number of samples per frame.
          attrs_name (str): Label names.
        """

        file_data, attrs_name = self.labelfile_reader.read_file(label_file)
        file_data = np.array(file_data).astype(np.float32)
        timestamps = file_data[:,0]
        labels = file_data[:-1,1:]
        
        seq_num = labels.shape[0] - 1
        
        clip = AudioFileClip(str(data_file), fps=self.fps)
        
        num_samples = int(self.fps * (timestamps[1] - timestamps[0]))
        frames = []        
        for i in range(len(timestamps) - 1):
            start_time = timestamps[i]
            end_time = timestamps[i + 1]
            
            data_frame = np.array(list(clip.subclip(start_time, end_time).iter_frames()))
            data_frame = data_frame.mean(1)[:num_samples]
            
            frames.append(data_frame.astype(np.float32))
        
        frames = np.array(frames).astype(np.float32)
        labels = np.array(labels).astype(np.float32)
        
        return frames, labels, seq_num, num_samples, attrs_name
コード例 #20
0
ファイル: video_project.py プロジェクト: kpister/oratio
    def overlay_dubbing_and_save(self):
        for locale in self._locales_to_export():
            track = self.tracks[locale]
            dubbed_audio = AudioFileClip(
                os.path.join(self.full_path, locale, DUBBED_AUDIO_FILENAME))
            dubbed_video = self.video_clip.set_audio(dubbed_audio)

            if self.include_watermarks:
                translated_locale_durations = self._get_locale_duration_tuples(
                    locale, dubbed_video)
                dubbed_video = overlay.watermark_video_clip(
                    dubbed_video, self.input_locale,
                    translated_locale_durations)

            dubbed_video.write_videofile(
                os.path.join(self.full_path, locale,
                             f"{locale}_{DUBBED_COMPOSITE_FILENAME}"),
                # TODO; This should be only used for .mov exports.
                # For .mp4 .ogv and .webm, codec is set automatically.
                codec=DEFAULT_CODEC_FOR_DUBBED_VIDEO,
                fps=self._fps(),
            )
コード例 #21
0
ファイル: main.py プロジェクト: FuckBrains/youtube-video-tool
def ttsVideo(subreddit="entitledparents", filter="month", limit=5):
    deleteTTS()
    for filename in os.listdir("tts/final/"):
        file_path = os.path.join("tts/final/", filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
        except Exception as e:
            print("Failed to delete %s. Reason: %s" % (file_path, e))

    for submission in reddit.subreddit(subreddit).hot(limit=limit):
        path = "./tts/"
        videos = []

        if len(submission.selftext) > 100:
            ttsMerge(submission)
            cliparray = []
            audioarray = []
            for file in os.listdir(path + "videos/"):
                cliparray.append(VideoFileClip(path + "videos/" + file))

            for file in os.listdir(path + "audio/"):
                audioarray.append(AudioFileClip(path + "audio/" + file))

            audio = concatenate_audioclips(audioarray)

            final_clip = concatenate_videoclips(cliparray, method="compose")
            final_clip.audio = audio
            final_clip.write_videofile(
                path + "final/" + submission.id + ".mp4",
                temp_audiofile="tts/tmp/tmp_audio.mp3",
            )
            deleteTTS()

    for file in os.listdir(path + "final/"):
        videos.append(VideoFileClip(path + "final/" + file))

    final_clip = concatenate_videoclips(videos, method="compose")
    final_clip.write_videofile("output2.mp4", temp_audiofile="tmp/tmp_audio.mp3")
コード例 #22
0
def spectrogram(
        filename: str,
        species: str,
        nfft: int = 512,
        window=np.hamming(512),
        format=AudioFormat.MP3,
        frame_rate: int = 22050,
):
    ''' Plot a spectrogram for WAV file and save '''
    # TODO: Overlap?
    # TODO: Ignoring frequency range

    try:
        if format == AudioFormat.WAV:
            audio_data = read_wav(filename, species)
        elif format == AudioFormat.MP3:
            audio = AudioFileClip(
                f'{SPECIES_RAW_AUDIO_PATH}{species}/mp3/{filename}.mp3')
            audio_data = audio.to_soundarray()
            audio_data = audio_data[:, 0]
        else:
            return NotImplementedError()
        fig = plt.figure()
        vmin = 20 * np.log10(np.max(audio_data)) - 100
        plt.specgram(
            audio_data,
            Fs=frame_rate,
            NFFT=nfft,
            window=window,
            cmap='inferno',
            vmin=vmin,
        )
        fig.savefig(f'{SPECTROGRAM_PATH}{species}/{filename}.jpg')
        plt.close(fig)
    except Exception as ex:
        print(
            f'ERROR: Failed to convert {filename} to Spectrogram. Reason="{ex}"'
        )
コード例 #23
0
def make_video(audio,
               filename,
               progan,
               n_bins=60,
               random_state=0,
               imgs_per_batch=20):
    y, sr = librosa.load(audio)
    song_length = len(y) / sr
    z_audio = get_z_from_audio(y,
                               z_length=progan.z_length,
                               n_bins=n_bins,
                               random_state=random_state)
    fps = z_audio.shape[0] / song_length
    res = progan.get_cur_res()
    shape = (res, res * 16 // 9, 3)

    imgs = np.zeros(shape=[imgs_per_batch, *shape], dtype=np.float32)

    def make_frame(t):
        global imgs
        cur_frame_idx = int(t * fps)

        if cur_frame_idx >= len(z_audio):
            return np.zeros(shape=shape, dtype=np.uint8)

        if cur_frame_idx % imgs_per_batch == 0:
            imgs = progan.generate(z_audio[cur_frame_idx:cur_frame_idx +
                                           imgs_per_batch])
            imgs = imgs[:, :, :res * 8 // 9, :]
            imgs_rev = np.flip(imgs, 2)
            imgs = np.concatenate((imgs, imgs_rev), 2)

        return imgs[cur_frame_idx % imgs_per_batch]

    video_clip = VideoClip(make_frame=make_frame, duration=song_length)
    audio_clip = AudioFileClip(audio)
    video_clip = video_clip.set_audio(audio_clip)
    video_clip.write_videofile(filename, fps=fps)
コード例 #24
0
def main(opt):
    """ return the url of concatenate video
    >>> main(opt={"output_dir":"my_video_scenes_tmp/","movie_shots":['my_video_scenes_tmp/test1.mp4', 'my_video_scenes_tmp/test2.mp4','my_video_scenes_tmp/test3.mp4','my_video_scenes_tmp/test4.mp4'],'user_id':'1'})
    'show_video_1.mp4' 
    """
    opt['output_dir'] = os.path.join(my_path, '../' + opt['output_dir'])
    for index, m in enumerate(opt['movie_shots']):
        opt['movie_shots'][index] = os.path.join(my_path, '../../' + m)
    clips = []
    for clip in opt['movie_shots']:
        clips.append(VideoFileClip(clip))
    # final_clip = concatenate_videoclips(clips)
    nu = random.randint(1, 100)
    base_dir = os.path.join(my_path, opt['output_dir'])
    write_url = os.path.join(
        my_path,
        opt['output_dir'] + "show_video_{}_{}.mp4".format(opt['user_id'], nu))
    #for i in os.listdir(base_dir):
    #    if os.path.isfile(os.path.join(base_dir,i)) and 'show_video_admin' in i:
    #        os.remove(os.path.join(base_dir, i))
    if 'audio' in opt.keys():
        if os.path.exists(write_url):
            os.remove(write_url)
        a_path = os.path.join(
            my_path, '../../' + 'my_video_scenes_tmp/music/' + opt['audio'])
        logo = os.path.join(my_path,
                            '../../' + 'my_video_scenes_tmp/' + opt['logo'])
        audio = AudioFileClip(a_path)
        final_clip = concatenate_videoclips(clips).set_audio(audio)
        final_clip = concatenate_videoclips([VideoFileClip(logo), final_clip])
        msg = final_clip.write_videofile(write_url)
    else:
        final_clip = concatenate_videoclips(clips)
        msg = final_clip.write_videofile(write_url)
    show_url = "show_video_{}_{}.mp4".format(opt['user_id'], nu)
    for clip in clips:
        clip.close()
    return show_url
コード例 #25
0
ファイル: _track.py プロジェクト: the-dotify-project/dotify
    def as_mp3(
        self,
        mp3_path: Path,
        skip_existing: Optional[bool] = False,
        progress_logger: Optional[logging.Logger] = None,
    ) -> Path:
        """Download the track in `.mp3` format.

        Args:
            mp3_path (Path): where should the resulting file be stored
            skip_existing (Optional[bool]): whether or not to overwrite
                an existing file. Defaults to False.
            progress_logger (Optional[logging.Logger]): a logger reporting
                on the download progress. Defaults to None.

        Returns:
            Path: the download location of the `.mp3` file
        """
        # FIXME: genres
        # FIXME: progress bar and logging both for moviepy and pytube

        mp3_path = Path(mp3_path)

        mp4_path = self.as_mp4(mp3_path, skip_existing=skip_existing)

        audio_file_clip = AudioFileClip(str(mp4_path))
        audio_file_clip.write_audiofile(str(mp3_path), logger=progress_logger)

        mp4_path.unlink()

        easy_id3 = EasyID3(mp3_path)

        easy_id3.update(self.id3_tags)

        easy_id3.save(v2_version=3)

        return mp3_path
コード例 #26
0
def generate_video(name):
    try:
        image_folder = '.'  # make sure to use your folder
        video_name = 'mygeneratedvideo.mp4'
        os.chdir(os.path.join(settings.BASE_DIR, r"dataset/" + name))
        print(os.listdir())
        images = [
            img for img in os.listdir(image_folder) if img.endswith(".jpg")
            or img.endswith(".jpeg") or img.endswith("png")
        ]

        frame = cv2.imread(os.path.join(image_folder, images[0]))

        height, width, layers = frame.shape
        frameRate = 10
        video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'mp4v'), 1,
                                (width, height))
        audioLength = AudioFileClip('audio.mp3').duration

        videoToLoop = audioLength
        if videoToLoop < 1:
            videoToLoop = 1

        # Appending the images to the video one by one
        while videoToLoop > 0:
            for image in images:
                for i in range(frameRate):
                    video.write(cv2.imread(os.path.join(image_folder, image)))
            videoToLoop -= (frameRate * len(images))
        # Deallocating memories taken for window creation
        cv2.destroyAllWindows()
        video.release()  # releasing the video generated
        print(os.listdir())
    except Exception as e:
        print('m.v. generatevideo')
        print(e)
コード例 #27
0
def audio_frequency(df, f_index=None, parameters=None, call_num=None):
    """
    Decodes the audio frequency in our videos. We use this to 
    encode information about the acceleration being applied to a video
    directly into the audio channel. This enables us to get the info back out

    Args
    ----
        df ([type]): [description]
        f_index ([type], optional): [description]. Defaults to None.
        parameters ([type], optional): [description]. Defaults to None.
        call_num ([type], optional): [description]. Defaults to None.

    Returns
    -------
        [type]: [description]
    """
    try:
        filename = parameters['experiment']['video_filename']
        command = f"ffmpeg -i {filename} -ar 48000 -ss {0.02*f_index} -to {0.02*(f_index+1)} -vn out.wav"
        if os.path.exists("out.wav"):
            os.remove("out.wav")
        subprocess.call(command, shell=True, stderr=subprocess.DEVNULL)
        audio_arr = AudioFileClip("out.wav").to_soundarray(fps=48000,
                                                           nbytes=2)[:, 0]
        ft = np.abs(np.fft.fft(audio_arr, n=len(audio_arr)))
        freq = np.fft.fftfreq(len(audio_arr), 1 / 48000)
        peak = int(abs(freq[np.argmax(ft)]))
        if 'audio_frequency' not in df.columns:
            df['audio_frequency'] = -1.0
        df_frame = df.loc[[f_index]]
        df_frame['audio_frequency'] = peak
        df.loc[f_index] = df_frame
        return df
    except Exception as e:
        raise AudioFrequencyError(e)
コード例 #28
0
def remove_video_music(video_name, video_path, output_video_path, output_dir):
    '''
    @argument video_name: video name without extension
    @argument video_path: the path to the video
    @argument output_video_path: the path to including the new name of the video
    @argument output_dir: the output directory
    '''

    output_dir = str(output_dir)
    # print('*************video path: ' + video_path)
    separator = Separator('spleeter:2stems')  # Setup spleeter
    video = VideoFileClip(str(video_path))  # Get the input video
    audio = video.audio  # extract the audio from the input video
    # os.makedirs(output_dir) # create the directory of the output
    # write the audio file on the output directory to be used in the separtor function
    audio.write_audiofile(output_dir + video_name + '.wav')
    # seperate the music from the video
    separator.separate_to_file(output_dir + video_name + '.wav', output_dir)
    #get the audio without music
    new_audio = AudioFileClip(output_dir + video_name + '/vocals.wav')
    # # set the new music-free audio to the video
    new_video = video.set_audio(new_audio)
    # # audio_file_name = output_dir + video_name + '/vocals.wav'
    new_video.write_videofile(str(output_video_path), audio=True)
コード例 #29
0
    def run(self, *args, **kwargs):

        slide_clip = args[0]
        speaker_clip = args[1]
        meta = args[2] if len(args) > 2 else None
        audio_clip = args[3] if len(args) > 3 else None

        assert (hasattr(self, 'processing_time'))
        self.processing_time = None

        # start time, just to save something
        start = datetime.datetime.now()

        input_video = os.path.join(self.video_location, self.video_filename)
        output_video = os.path.join(self.output_video_folder,
                                    self.output_video_file)
        output_video_no_container = os.path.splitext(output_video)[
            0]  # container will be appended by the layout function

        # the folder containing the images common to all videos
        ressource_folder = os.path.abspath(
            os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
                         os.pardir, "ressources"))
        video_background_image = os.path.join(ressource_folder,
                                              self.background_image_name)

        credit_images_and_durations = [
            (os.path.join(ressource_folder, i), 5)
            for i in self.credit_image_names
        ]  # None sets the duration to the default

        # introduction image
        intro_images_and_durations = []
        if meta is not None and "intro_images" in meta:
            intro_images_and_durations = []
            for current in meta['intro_images']:

                image_file = current

                if not os.path.exists(image_file):
                    # fallback in case the image does not exist (full path not given)
                    if self.video_intro_images_folder is not None:
                        image_file = os.path.join(
                            self.video_intro_images_folder, current)

                if not os.path.exists(image_file):
                    logger.error("[INTRO] image %s not found", image_file)
                    raise RuntimeError("[INTRO] image %s does not exist" %
                                       image_file)

                intro_images_and_durations += [
                    (image_file, None)
                ]  # None sets the duration to the default

        # pauses / including video begin/end
        pauses = []
        if meta is not None and "video_begin" in meta and meta[
                'video_begin'] is not None:
            pauses += [(None, meta['video_begin'])]  # None means video begin

        if meta is not None and "video_end" in meta and meta[
                'video_end'] is not None:
            pauses += [(meta['video_end'], None)]  # None means video end

        # if no specific handling of the audio is performed upstream, we default to the one of the video
        if audio_clip is None:
            audio_clip = AudioFileClip(input_video)

        createFinalVideo(
            slide_clip=slide_clip,
            speaker_clip=speaker_clip,
            audio_clip=audio_clip,
            video_background_image=video_background_image,
            intro_image_and_durations=intro_images_and_durations,
            credit_images_and_durations=credit_images_and_durations,
            fps=30,
            talk_title=meta['talk_title'] if meta is not None else 'title',
            speaker_name=meta['speaker_name'] if meta is not None else 'name',
            talk_date=meta['talk_date'] if meta is not None else 'today',
            first_segment_duration=10,
            pauses=pauses,
            output_file_name=output_video_no_container,
            codecFormat='libx264',
            container=self.get_container(),
            flagWrite=True,
            is_test=self.is_test)

        # stop time
        stop = datetime.datetime.now()
        self.processing_time = (stop - start).seconds

        pass
コード例 #30
0
ファイル: app.py プロジェクト: irtiza07/python-flask
def process_video():
  loops = [VideoFileClip('./static/backing-video.mp4') for i in range(10)]
  audio = AudioFileClip("combined.wav").set_duration(30)
  final_clip = concatenate_videoclips(loops).set_audio(audio)
  final_clip.write_videofile("output.mp4", audio_codec='libvorbis')