示例#1
0
 def Gen_Video(self, beat_times, mp3path, uuid):
     FONT_URL = '../font/heimi.TTF'
     with open(uuid + '.txt', 'r', encoding='utf-8') as f:
         text_str = f.read()
     word_list = text_str.split('\n')
     clips = []
     for index, beat_time in enumerate(beat_times[:-1]):
         if index >= len(word_list):
             break
         print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
         text_clip = TextClip(
             word_list[index],
             fontsize=320 // 8,
             color='white',
             size=(320, 640),
             method='caption',
             font=FONT_URL) \
             .set_start(beat_time) \
             .set_end(beat_times[index + 1])
         text_clip = text_clip.set_pos('center')
         clips.append(text_clip)
     final_clip = CompositeVideoClip(clips)
     audio_clip = AudioFileClip(mp3path)
     final_video = final_clip.set_audio(audio_clip)
     final_video.write_videofile(str(uuid) + '.mp4',
                                 fps=30,
                                 codec='mpeg4',
                                 preset='ultrafast',
                                 audio_codec="libmp3lame",
                                 threads=4)
示例#2
0
	def _compose_buffer(self):
		audio = concatenate_audioclips(self.sounds)
		video = CompositeVideoClip(self.images, 
			size=(self.w, self.h)).set_duration(audio.duration)
		video = video.set_audio(audio)
		self.clips.append(video)
		self.sounds, self.images = [], []
		self._push_image(self.background_image)
示例#3
0
def video_render(txt_file,image_file,sound_file,save_file):
        from moviepy.editor import ImageClip
        from moviepy.editor import CompositeVideoClip
        from moviepy.editor import CompositeAudioClip
        from moviepy.editor import TextClip
        from moviepy.editor import AudioFileClip
        from moviepy.editor import concatenate
        from moviepy.config import change_settings
        change_settings({"IMAGEMAGICK_BINARY": "/usr/local/bin/convert"})
        text=[]
        
        with open(txt_file,'r') as file:
            for lines in file:
                if lines!="\n":
                    text.append(lines.rstrip('\n'))
        durs=[]
        for i in text:            
            res = len(re.findall(r'\w+', i)) 
            if res/2>3:
                durs.append(res/2)
            else:
                durs.append(3)
        total_duration=sum(durs)
        
        a_clip = AudioFileClip(sound_file)
        if a_clip.duration<total_duration:
            new_audioclip = CompositeAudioClip([a_clip, a_clip.set_start(a_clip.duration-1)]).set_duration(total_duration+3)
        else:
            new_audioclip=a_clip.set_duration(total_duration+3)
        
        screen=(1920,1080)
        clip_list = []
        i=0
        for string in text:
            duration=durs[i]
            i+=1
            try:
                txt_clip = TextClip(string, fontsize = 70, color = 'white', method='caption',size=screen ).set_duration(duration).set_pos('center')
                clip_list.append(txt_clip)
            except UnicodeEncodeError:
                txt_clip = TextClip("Issue with text", fontsize = 70, color = 'white').set_duration(2) 
                clip_list.append(txt_clip)
        
        final_text_clip = concatenate(clip_list, method = "compose").set_start(3)  
            
        v_clip = ImageClip(image_file).set_duration(total_duration+3)
        video=CompositeVideoClip([v_clip, final_text_clip])
        # video = video.set_audio(AudioFileClip('sound/Serenity (1).mp3'))
        video = video.set_audio(new_audioclip)
        video.write_videofile(save_file, 
                              codec='libx264',
                              fps=10, 
                              threads=4,
                              audio_codec='aac', 
                              temp_audiofile='temp-audio.m4a', 
                              remove_temp=True
                              )
示例#4
0
def visualize(model_cls, input_data):
    os.environ["FFMPEG_BINARY"] = "ffmpeg"

    model = model_cls()
    output = model.encode(input_data)
    output = output.reshape(output.shape[0] * 512, 128)
    min_val = np.amin(output)
    max_val_normalized = np.amax(output) - min_val

    last_percentage = -1
    figures = []

    # (graph total duration / graph datapoint count) * (graph datapoint count / graph width)
    figure_snapshot_rate = 40
    tick_to_sample_ratio = 32.87890625  # This is still off sync with the audio, 2:53 becomes 2:58 for some reason
    frame_duration = (figure_snapshot_rate * tick_to_sample_ratio) / 44100
    for i in range(128):
        column = i % 16
        row = int(i / 16)
        figures.append(Figure(60, 60, row, column, frame_duration))

    print(f"Rendering output: {output.shape}")
    for index, entry in enumerate(output):
        should_snapshot = index % figure_snapshot_rate == 0

        for plot_index, plot in enumerate(figures):
            plot.push((entry[plot_index] - min_val) / max_val_normalized)

            if should_snapshot:
                plot.snapshot()

        percentage = int(index / len(output) * 100)
        if percentage % 1 == 0 and last_percentage != percentage:
            last_percentage = percentage
            print(f"Capturing figures: {percentage}%...")

    print(f"{len(figures[0].figures)} figure frames rendered")
    clips = [FigureClip(figure) for figure in figures]

    audio_filename = f"vis/output.wav"
    output = model.predict_output(input_data).flatten()
    write_wav(audio_filename, output)

    del model
    backend.clear_session()

    audio = AudioFileClip(audio_filename)
    audio = audio.set_start(0)
    audio = audio.set_duration(
        min(audio.duration, frame_duration * len(figures[0].figures)))

    result = CompositeVideoClip(clips, size=(16 * 66 + 12, 8 * 66 + 12))
    result = result.set_audio(audio)
    result.write_videofile("vis/output.mp4", fps=1 / frame_duration)
示例#5
0
def main(width, height, text, music, word_split, output):
    with open(text, 'r', encoding='utf-8') as f:
        text_str = f.read()
    if word_split:
        seg_list = jieba.lcut(text_str)
        punct = set(''':!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐、﹒
		﹔﹕﹖﹗﹚﹜﹞!),.:;?|}︴︶︸︺︼︾﹀﹂﹄﹏、~¢
		々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖([{£¥〝︵︷︹︻
		︽︿﹁﹃﹙﹛﹝({“‘-—_…/\\''')
        word_list = list(filter(lambda x: x not in punct, seg_list))
    else:
        word_list = text_str.split('\n')

    y, sr = librosa.load(music)
    tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
    beat_times = list(librosa.frames_to_time(beats, sr=sr))
    beat_times.append(beat_times[-1] + 1)

    clips = []
    for index, beat_time in enumerate(beat_times[:-1]):
        if index >= len(word_list):
            break
        print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
        text_clip = TextClip(
         word_list[index],
         fontsize=width // 8,
         color='white',
         size=(width, height),
         method='caption',
         font='msyhbd.ttc')\
         .set_start(beat_time)\
         .set_end(beat_times[index + 1])
        text_clip = text_clip.set_pos('center')
        clips.append(text_clip)

    final_clip = CompositeVideoClip(clips)
    audio_clip = AudioFileClip(music)
    final_video = final_clip.set_audio(audio_clip)
    final_video.write_videofile(output,
                                fps=30,
                                codec='mpeg4',
                                preset='ultrafast',
                                audio_codec="libmp3lame",
                                threads=4)
示例#6
0
    def genVideo(self):
        #加载一个图片clip
        clip = [ImageClip(self.imagepath)]

        # 用一个文本文件内容生成字幕
        #credits = credits1('../../credits/credits.txt', 3 * clip.w / 4)
        credits = credits1(self.textpath, 3 * clip.w / 4)
        scrolling_credits = credits.set_pos(lambda t: ('center', -10 * t))

        # 添加音频
        audio = AudioFileClip(self.musicpath)

        final = CompositeVideoClip([clip, scrolling_credits])
        final_video = final.set_audio(audio)
        final_video.write_videofile(str(self.name) + '.mp4',
                                    fps=30,
                                    codec='mpeg4',
                                    preset='ultrafast',
                                    audio_codec="libmp3lame",
                                    threads=4)
示例#7
0
def main(width, height, text, music, output):
    with open(text, 'r', encoding='utf-8') as f:
        word_list = f.readlines()
    words = "".join(word_list)
    words_num = len(filter_text(words))

    # 每个字的时长
    time_len = librosa.get_duration(filename=music)
    unit_time = time_len / words_num

    # 生成每句话的TextClip
    clips = []
    start = 0
    end = 0
    for text in word_list:
        start = end
        text = filter_text(text)
        end = start + unit_time * len(text)
        text_clip = TextClip(
            text,
            fontsize=width // 12,
            color='white',
            size=(width, height),
            method='caption',
            font='msyhbd.ttc')\
            .set_start(start)\
            .set_end(end)
        text_clip = text_clip.set_pos('center')
        clips.append(text_clip)

    # 生成最终的视频文件
    final_clip = CompositeVideoClip(clips)
    audio_clip = AudioFileClip(music)
    final_video = final_clip.set_audio(audio_clip)
    final_video.write_videofile(output,
                                fps=30,
                                codec='mpeg4',
                                preset='ultrafast',
                                audio_codec="libmp3lame",
                                threads=4)
def mixClips(filenames, intervals, targetDuration, audioClip=None):
    random.seed(datetime.now())

    final_clips = []
    numClips = 0
    targetSubDuration = targetDuration / len(filenames)
    fileDir = ""

    for filename in filenames:

        if (os.path.isabs(filename)):
            fileDir = filename
        else:
            fileDir = clip_dir + filename

        originalClip = VideoFileClip(fileDir)
        originalClip = originalClip.resize((1280, 720))
        final_clips = collectClipFragments(originalClip, final_clips,
                                           targetSubDuration, intervals)

    numClips = len(final_clips)
    random.shuffle(final_clips)

    #Apply effects to each clip
    final_clips[0] = applyEffect(final_clips[0], crossfadein=True)
    for i in range(1, numClips):
        final_clips[i] = final_clips[i].set_start(final_clips[i - 1].end - 0.2)
        final_clips[i] = final_clips[i].crossfadein(0.2)

    final_clips[numClips - 1] = final_clips[numClips - 1].crossfadeout(0.2)

    finalClip = CompositeVideoClip(final_clips)

    if (audioClip is None):
        finalClip = finalClip.without_audio()
    else:
        finalClip = finalClip.set_audio(audioClip)

    return finalClip
示例#9
0
class BaseClip:
    def __init__(self, clip):
        self.clip = CompositeVideoClip(clips=[clip])
        self.duration = self.clip.duration

    def resize(self, new_size):
        """
        Uses moviepy.video.fx.all.resize module
        :param new_size: Can be wither(width,height) in pixels or a float
                         A scaling factor, like 0.5
                         A function of time returning one of these.
        """
        self.clip = self.clip.resize(new_size)

    def crop(self,
             aspectRatio=None,
             x1=None,
             y1=None,
             x2=None,
             y2=None,
             width=None,
             height=None,
             x_center=None,
             y_center=None):
        """
        Uses moviepy.video.fx.crop module. From documentation:
        Returns a new clip in which just a rectangular subregion of the
        original clip is conserved. x1,y1 indicates the top left corner and
        x2,y2 is the lower right corner of the croped region.
        All coordinates are in pixels. Float numbers are accepted.
        :param x1: top left corner x-axis
        :param y1: top left corner y-axis
        :param x2: bottom right corner x-axis
        :param y2: bottom right corner y-axis
        :param width: width of rectangle
        :param height: height of rectangle
        :param x_center: x-axis center
        :param y_center: y-axis center
        """

        # If a preselected aspect ratio was selected.
        if aspectRatio:
            if not x_center:
                x_center = self.clip.w / 2
            if not y_center:
                y_center = self.clip.h / 2

            # Vertical/Phone ratio
            if aspectRatio == "vertical" or aspectRatio == "9:16" or aspectRatio == "phone":
                self.clip = self.clip.crop(width=self.clip.h * 9 / 16,
                                           height=self.clip.h,
                                           x_center=x_center,
                                           y_center=y_center)

            # Square ratio
            elif aspectRatio == "square" or aspectRatio == "1:1":
                self.clip = self.clip.crop(width=self.clip.h,
                                           height=self.clip.h,
                                           x_center=x_center,
                                           y_center=y_center)

            # 4:3/Letterbox ratio
            elif aspectRatio == "4:3" or aspectRatio == "1.33:1" or aspectRatio == "letterbox":
                self.clip = self.clip.crop(width=self.clip.h * 1.33,
                                           height=self.clip.h,
                                           x_center=x_center,
                                           y_center=y_center)

            # 16:9/Widescreen ratio
            elif aspectRatio == "16:9" or aspectRatio == "widescreen" or aspectRatio == "1.77:1":
                self.clip = self.clip.crop(width=self.clip.w,
                                           height=self.clip.w / 1.77,
                                           x_center=x_center,
                                           y_center=y_center)

            # 21:9/Cinemascope ratio
            elif aspectRatio == "cinemascope" or aspectRatio == "21:9" or aspectRatio == "2.33:1":
                self.clip = self.clip.crop(width=self.clip.w,
                                           height=self.clip.w / 2.33,
                                           x_center=x_center,
                                           y_center=y_center)

            # 2.35:1/Anamorphic ratio
            elif aspectRatio == "anamorphic" or aspectRatio == "2.35:1":
                self.clip = self.clip.crop(width=self.clip.w,
                                           height=self.clip.w / 2.35,
                                           x_center=x_center,
                                           y_center=y_center)

            # 2.39:1/DCI ratio
            elif aspectRatio == "DCI" or aspectRatio == "2.39:1":
                self.clip = self.clip.crop(width=self.clip.w,
                                           height=self.clip.w / 2.39,
                                           x_center=x_center,
                                           y_center=y_center)

            # 2.9:1/Digital IMAX ratio
            elif aspectRatio == "Digital IMAX" or aspectRatio == "2.9:1":
                self.clip = self.clip.crop(width=self.clip.w,
                                           height=self.clip.w / 2.9,
                                           x_center=x_center,
                                           y_center=y_center)

            # If an invalid aspect ratio was specified, raise an exception.
            else:
                raise AttributeError("Invalid Aspect Ratio specified: '" +
                                     str(aspectRatio) + "'")

        # If no preset ratio was selected, use other crop parameters.
        else:
            self.clip = self.clip.crop(x1=x1,
                                       y1=y1,
                                       x2=x2,
                                       y2=y2,
                                       width=width,
                                       height=height,
                                       x_center=x_center,
                                       y_center=y_center)

    def add_text(self, text, font_size, color, font, interline, posString,
                 duration):
        """
        Add a layer of text over the selected clip.

        :param text:
        :param font_size:
        :param color:
        :param font:
        :param interline:
        :param pos:
        :param duration:
        :return:
        """
        pos = (self.clip.w / 2, self.clip.h / 2)
        if posString == 'top':
            pos = (self.clip.w / 2, self.clip.h / (self.clip.h - 0.5))
        elif posString == 'left':
            pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / 2)
        elif posString == 'bottom':
            pos = (self.clip.w / 2, self.clip.h / 1.1)
        elif posString == 'right':
            pos = (self.clip.w / 1.1, self.clip.h / 2)
        elif posString == 'top-left':
            pos = (self.clip.w / (self.clip.w - 0.5),
                   self.clip.h / (self.clip.h - 0.5))
        elif posString == 'top-right':
            pos = (self.clip.w / 1.1, self.clip.h / (self.clip.h - 0.5))
        elif posString == 'bottom-left':
            pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / 1.1)
        elif posString == 'bottom-right':
            pos = (self.clip.w / 1.1, self.clip.h / 1.1)
        text = TextClip(
            text,
            fontsize=font_size,
            color=color,
            font=font,
            interline=interline).set_pos(pos).set_duration(duration)
        self.clip = CompositeVideoClip([self.clip, text])

    def addAudioFromFile(self, audio, start_time, end_time):
        """
        Uses moviepy.audio.io.AudioFileClip module. from Doc:
        An audio clip read from a sound file, or an array. The whole file is not loaded in memory.
        Instead, only a portion is read and stored in memory. this portion includes frames before and after the
        last frames read, so that it is fast to read the sound backward and forward.

        :param audio: audio file taken from directory (mp3, wav, etc)
        :return: adds audio to the clip being worked on (self.clip)


        This method works with the clip that was made and is stored on self.clip, which means it will alter the
        a clip that is already being made, not a new external clip. This is to avoid discrepancies when making
        new clips with or without overlay audio.
        """

        thisAudio = AudioFileClip(audio)
        changedAudio = thisAudio.subclip(start_time, end_time)
        self.clip = self.clip.set_audio(changedAudio)

    def addAudioFromClip(self, clipToExtract, start_time, end_time):
        """
        Instead of using an audio file like the method before this, it takes another video such as an mp4 file
        and rips the audio out of it, converts it into an AudioClip, and overlays it on the clip that is
        currently being worked on.

        ****This DOES NOT work with clips made through the VideoFileClip() method, since they have been processed
        as a different file type, and already have their own audio attribute. To access such, one just needs to call
        'clip'.audio, clip being your target clip for audio extraction.

        :param clipToExtract: video from directory (mp4, etc)
        :return: adds audio to the clip being worked on (self.clip)

        """

        thisAudio = AudioFileClip(clipToExtract)
        changedAudio = thisAudio.subclip(start_time, end_time)
        self.clip = self.clip.set_audio(changedAudio)

    def writeVideo(self, filename):
        """
        Write the video to a file.
        :param filename: name and format of output file.
        :return:
        """
        self.clip.write_videofile(filename)

    def create_gif(self, filename):
        # TODO: gif that loops fluidly
        self.clip.write_gif(filename)
示例#10
0
intro_text = intro_text.set_duration(intro_duration)
intro_text = intro_text.set_fps(fps)
intro_text = intro_text.set_pos("center")

# to add audio to your intro:

intro_music = audio_clip.subclip(25, 30)
intro_text = intro_text.set_audio(intro_music)

watermark_size = 50
watermark_text = TextClip(watermark,
                          fontsize=watermark_size,
                          color='black',
                          align='East',
                          size=(w, watermark_size))
watermark_text = watermark_text.set_fps(fps)
watermark_text = watermark_text.set_duration(video_clip.reader.duration)
watermark_text = watermark_text.margin(left=10, right=10, bottom=2, opacity=0)
watermark_text = watermark_text.set_position(("bottom"))

watermarked_clip = CompositeVideoClip([video_clip, watermark_text],
                                      size=video_clip.size)
watermarked_clip = watermarked_clip.set_duration(video_clip.reader.duration)
watermarked_clip = watermarked_clip.set_fps(fps)
watermarked_clip = watermarked_clip.set_audio(final_audio)

final_clip = concatenate_videoclips([intro_text, watermarked_clip])
final_clip.write_videofile(final_video_path,
                           codec='libx264',
                           audio_codec="aac")
示例#11
0
    video = combined_clip

#audio
audio_files = []

for i in os.listdir():
    if i.endswith(".mp3") or i.endswith(".wav"):
        audio_files.append(i)

print("Audio files loaded are: " + str(audio_files))

for i, clip in enumerate(audio_files):
    audio_files[i] = AudioFileClip(clip)

#ToDo Concatenate audio tracks into audioclip
combined_audio = concatenate_audioclips(audio_files)

#Set Duration of audioclip
background_audio = combined_audio.set_duration(video.duration)

#combine videos' audio and audio track
video_audio = video.audio
print(background_audio)
print(video_audio)
final_audio = CompositeAudioClip([background_audio, video_audio])
final_clip = video.set_audio(final_audio)

#render
print("Composition successful. Rendering!")
final_clip.write_videofile(output_name, fps=fr, logger=None)
示例#12
0
def main():
    # backend.set_floatx("float16")
    # backend.set_epsilon(1e-4)

    data = get_dataset(
        block_interval=10000,
        block_size=INPUT_COUNT,
        file_count=30,
        output_size=0,
        shuffle=True,
    )
    train_data = data.train_data.reshape(len(data.train_data), INPUT_COUNT, 1)
    test_data = data.test_data.reshape(len(data.test_data), INPUT_COUNT, 1)

    model = ExperimentalModel()
    model.load()

    if "--train" in sys.argv:
        model.train(train_data, train_data, test_data, test_data)

    if "--plot" in sys.argv:
        plt.subplot(2, 2, 1)
        plt.plot(data.files[0][200])

        plt.subplot(2, 2, 2)
        plt.plot(
            model.predict_output(data.files[0][200].reshape(1, INPUT_COUNT,
                                                            1)).flatten())

        plt.subplot(2, 2, 3)
        plt.plot(data.files[0][210])

        plt.subplot(2, 2, 4)
        plt.plot(
            model.predict_output(data.files[0][210].reshape(1, INPUT_COUNT,
                                                            1)).flatten())

        plt.show()

    if "--out" in sys.argv:
        for i in range(min(len(data.files), 10)):
            inp = data.files[i].reshape(len(data.files[i]), INPUT_COUNT, 1)
            output = model.predict_output(inp).flatten()
            data.write_wav(f"output-{NAME}-{MODEL_ID}-{i}.wav", output)
            print(f"output-{NAME}-{MODEL_ID}-{i}.wav created")

    if "--convert" in sys.argv:
        file_data = get_dataset(
            block_interval=INPUT_COUNT,
            block_size=INPUT_COUNT,
            file_count=107,
            output_size=0,
            shuffle=False,
            just_files=True,
        )
        inp = data.files.reshape()

    if "--vis" in sys.argv:
        os.environ["FFMPEG_BINARY"] = "ffmpeg"
        from moviepy.editor import (CompositeVideoClip, AudioFileClip,
                                    VideoClip)

        file = data.files[0]
        inp = file.reshape(len(file), INPUT_COUNT, 1)
        output = model.encode(inp)
        output = output.reshape(output.shape[0] * 512, 128)
        min_val = np.amin(output)
        max_val_normalized = np.amax(output) - min_val

        class Figure(object):
            def __init__(self, width, height, row, column, frame_duration):
                self.width = width
                self.height = height
                self.row = row
                self.column = column
                self.frame_duration = frame_duration
                self.current_highest = 0
                self.buffer = [0 for i in range(self.width)]
                self.figures = []

            def push(self, val):
                if val > self.buffer[-1]:
                    self.buffer[-1] = val

            def render(self, peaks):
                figure = np.zeros((self.width, self.height), int)
                for column, peak in enumerate(peaks):
                    for fill in range(int(round(peak * (self.height - 1)))):
                        figure[self.height - 1 - fill, column] = 255
                return np.stack((figure, ) * 3, axis=-1)

            def snapshot(self):
                self.figures.append(self.buffer)
                self.buffer = self.buffer[1:self.width] + [0]

        class FigureClip(VideoClip):
            def __init__(self, figure):
                super().__init__()
                self.figure = figure
                self.make_frame = lambda time: self.make_into_frame(time)
                self.start = 0
                self.end = figure.frame_duration * len(figure.figures)
                self.size = (figure.width, figure.height)

                # 16 columns
                # 8 rows
                # padding of 6px
                self.pos = lambda _: (66 * figure.column + 6, 66 * figure.row +
                                      6)

            def make_into_frame(self, time):
                index = int(time / self.figure.frame_duration)
                if index > len(self.figure.figures):
                    return np.zeros(self.figure.width, self.figure.height)
                return self.figure.render(self.figure.figures[index])

        last_percentage = -1
        figures = []

        # (graph total duration / graph datapoint count) * (graph datapoint count / graph width)
        figure_snapshot_rate = 40
        tick_to_sample_ratio = 32.87890625  # This is still off sync with the audio, 2:53 becomes 2:58 for some reason
        frame_duration = (figure_snapshot_rate * tick_to_sample_ratio) / 44100
        for i in range(128):
            column = i % 16
            row = int(i / 16)
            figures.append(Figure(60, 60, row, column, frame_duration))

        print(f"Rendering output: {output.shape}")
        for index, entry in enumerate(output):
            should_snapshot = index % figure_snapshot_rate == 0

            for plot_index, plot in enumerate(figures):
                plot.push((entry[plot_index] - min_val) / max_val_normalized)

                if should_snapshot:
                    plot.snapshot()

            percentage = int(index / len(output) * 100)
            if percentage % 1 == 0 and last_percentage != percentage:
                last_percentage = percentage
                print(f"Capturing figures: {percentage}%...")

        print(f"{len(figures[0].figures)} figure frames rendered")
        clips = [FigureClip(figure) for figure in figures]

        audio_filename = f"vis/output.wav"
        output = model.predict_output(inp).flatten()
        data.write_wav(audio_filename, output)

        del model
        backend.clear_session()

        audio = AudioFileClip(audio_filename)
        audio = audio.set_start(0)
        audio = audio.set_duration(
            min(audio.duration, frame_duration * len(figures[0].figures)))

        result = CompositeVideoClip(clips, size=(16 * 66 + 12, 8 * 66 + 12))
        result = result.set_audio(audio)
        result.write_videofile("vis/output.mp4", fps=1 / frame_duration)
示例#13
0
          (','.join(map(str, [s + 1 for s in override_only_scene]))))
print('using audio %s' % INPUT_AUDIO)
print('using onsets %s' % INPUT_ONSETS)

# setup the frame generator
frame_gen = VideoFrameGenerator(SCENES, onset_frame_ampl)
gen_clip = VideoClip(lambda t: frame_gen.make_video_frame(t),
                     duration=duration)

# setup intro text
introtext = "kiriloff – fortschritt"
introtext_clip = TextClip(introtext,
                          color='white',
                          font='Menlo-Bold',
                          fontsize=20 if gen_clip.size[0] <= 640 else 45,
                          method='caption',
                          size=(frame_gen.w, frame_gen.h))

# create full clip as composite of generate frames and intro text
main_clip = CompositeVideoClip([
    gen_clip,
    introtext_clip.set_start(0.5).set_end(6.5).crossfadein(0.5).crossfadeout(
        0.5)
])

# generate frames
main_clip = main_clip.set_audio(audioclip).set_duration(gen_clip.duration)
main_clip.write_videofile(OUTPUT_VIDEO, fps=CLIP_FPS)

print('done.')
示例#14
0
def main():
    """程序的主函数
    程序运行的主要逻辑
    """
    check_dirs(images_dir, bgm_dir)
    if mkdirs(tmp_dir):
        console.log(tmp_dir + "创建成功")
    if mkdirs(tmp_image_dir):
        console.log(tmp_image_dir + "创建成功")
    if mkdirs(tmp_music_dir):
        console.log(tmp_music_dir + "创建成功")
    if mkdirs(output_dir):
        console.log(output_dir + "创建成功")
    img_paths = read_dir(images_dir)
    for img_file in track(img_paths, description="调整图片中..."):
        resize_image(img_file, tmp_image_dir, display_size, bg_color)

    img_paths = read_dir(tmp_image_dir)
    bgm_paths = read_dir(bgm_dir)

    # 计算时长
    video_total_time, music_total_time = computed_time(img_paths,
                                                       per_img_display_time,
                                                       start_img_duration,
                                                       end_img_duration)

    clips = []

    # 创建片头介绍文字
    console.log("开始创建开头文字")

    video_start_info_img_path = make_info_img(
        display_size,
        bg_color,
        font_color,
        des_text,
        font_path,
        100,
        tmp_image_dir,
        "start_info.jpg",
    )
    video_start_info_img_clip = make_image_clip(
        video_start_info_img_path,
        start_img_duration,
        fps,
        0,
        start_img_duration,
        start_img_fade_time,
    )
    clips.append(video_start_info_img_clip)
    console.log("开头文字创建完毕")

    count = 0
    for img_path in track(img_paths, description="添加图片帧..."):

        tmp_space_start = per_img_display_time * count + start_img_duration
        tmp_space_end = per_img_display_time * (count + 1) + start_img_duration
        img_clip = make_image_clip(
            img_path,
            per_img_display_time,
            fps,
            tmp_space_start,
            tmp_space_end,
            per_img_fade_time,
        )
        clips.append(img_clip)
        count = count + 1

    # 创建片尾文字
    console.log("开始创建片尾文字")

    video_end_info_img_path = make_info_img(
        display_size,
        bg_color,
        font_color,
        end_text,
        font_path,
        100,
        tmp_image_dir,
        "end_info.jpg",
    )
    video_end_info_img_clip = make_image_clip(
        video_end_info_img_path,
        start_img_duration,
        fps,
        video_total_time - end_img_duration,
        video_total_time,
        end_img_fade_time,
    )
    clips.append(video_end_info_img_clip)
    console.log("片尾文字创建完毕")

    bgm_tmp_file_path = make_bgm(bgm_paths, tmp_music_dir, "bgm.mp3",
                                 music_total_time, bgm_fade_time)
    console.log("经过处理的音频文件路径为" + bgm_tmp_file_path)
    bgm_clip = AudioFileClip(bgm_tmp_file_path)
    console.log("背景音乐切片处理完毕")

    # 混合CLIP
    console.log("开始将帧切片合并为视频文件")
    final_clip = CompositeVideoClip(clips)
    console.log("开始合并背景音乐切片")
    final_clip = final_clip.set_audio(bgm_clip)
    console.log("开始导出视频文件到" + output_file)
    final_clip.write_videofile(output_file)
    # 清除中间转存文件
    console.log("清除中间转换缓存文件")
    rmdirs(tmp_dir)
    console.log("中间转换缓存文件清理完毕")