示例#1
0
    def make_crab(self, t, u_id):
        """Non blocking crab rave video generation from DankMemer bot
        
        https://github.com/DankMemer/meme-server/blob/master/endpoints/crab.py
        """
        fp = str(cog_data_path(self) / f"Verdana.ttf")
        clip = VideoFileClip(str(cog_data_path(self)) + "/template.mp4")
        text = TextClip(t[0], fontsize=48, color="white", font=fp)
        text2 = (
            TextClip("____________________", fontsize=48, color="white", font=fp)
            .set_position(("center", 210))
            .set_duration(15.4)
        )
        text = text.set_position(("center", 200)).set_duration(15.4)
        text3 = (
            TextClip(t[1], fontsize=48, color="white", font=fp)
            .set_position(("center", 270))
            .set_duration(15.4)
        )

        video = CompositeVideoClip(
            [clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1)]
        ).set_duration(15.4)
        video.write_videofile(
            str(cog_data_path(self)) + f"/{u_id}crabrave.mp4",
            threads=1,
            preset="superfast",
            verbose=False,
            logger=None,
            temp_audiofile=str(cog_data_path(self) / "crabraveaudio.mp3")
        )
        clip.close()
        video.close()
        return True
示例#2
0
def save_out(tracks, outfile=None, filetype='mp4'):

    out = []

    vids = [t for t in tracks if t['type'] == 'vid']
    texts = [t for t in tracks if t['type'] == 'text']

    for v in vids:
        c = VideoFileClip(v['content']).subclip(v['in'],
                                                v['in'] + v['duration'])
        c = c.set_start(v['start'])
        out.append(c)

    size = out[0].size

    for t in texts:
        c = create_sub(t['content'], size, rect_offset=195, min_height=55)
        c = c.set_start(t['start'])
        c = c.set_duration(t['duration'])
        out.append(c)

    final_clip = CompositeVideoClip(out)
    if outfile is None:
        outfile = 'msg_' + str(int(time.time())) + '.mp4'
    if filetype == 'gif':
        outfile = outfile.replace('.mp4', '.gif')
        final_clip.speedx(1.7).write_gif(outfile, fps=7, loop=1)
    else:
        final_clip.write_videofile(outfile, fps=24, codec='libx264')
    return outfile
def generate_video(saved_model_path, video_category=None):
    """Uses the trained model to predict the frames and produce a video out of them"""
    # load model
    model = load_model(saved_model_path)

    which_one = video_category
    train_files, test_files = get_train_test_files(which=which_one)
    test_gen = get_data_gen(files=test_files,
                            timesteps=timesteps,
                            batch_size=batch_size,
                            im_size=(im_width, im_height))

    y_true = []
    y_pred = []

    for _ in range(200):
        x, y = next(test_gen)
        y_true.extend(y)

        predictions = model.predict_on_batch(x)
        y_pred.extend(predictions)

    clip1 = ImageSequenceClip([denormalize(i) for i in y_true], fps=5)
    clip2 = ImageSequenceClip([denormalize(i) for i in y_pred], fps=5)
    clip2 = clip2.set_position((clip1.w, 0))
    video = CompositeVideoClip((clip1, clip2), size=(clip1.w * 2, clip1.h))
    video.write_videofile(
        "{}.mp4".format(which_one if which_one else "render"), fps=5)
示例#4
0
    def generate(self, avatars, text, usernames, kwargs):
        name = uuid.uuid4().hex + '.mp4'
        if len(text) >= 400:
            text = text[:400] + '...'

        @after_this_request
        def remove(response):  # pylint: disable=W0612
            try:
                os.remove(name)
            except (FileNotFoundError, OSError, PermissionError):
                pass

            return response

        clip = VideoFileClip("assets/letmein/letmein.mp4")



        textclip = TextClip(txt=text, bg_color='White', fontsize=32, font='Verdana', method='caption', align='west', size=(clip.size[0], None)).set_duration(clip.duration)

        color = ColorClip((clip.size[0], textclip.size[1]), color=(255, 255, 255), ismask=False).set_duration(clip.duration)

        video = CompositeVideoClip([clip.set_position(("center", textclip.size[1])), color, textclip],
                                   size=(clip.size[0], textclip.size[1] + clip.size[1]))

        video.write_videofile(name, threads=4, preset='superfast', verbose=False)
        clip.close()
        video.close()
        return send_file(name, mimetype='video/mp4')
示例#5
0
def createPartition(index, videoData):
    os.chdir("videos/" + str(index))
    #Get the start and end second from the YAML Config and edit all of the videos into clips that can be concatinated.
    start = videoData['startFrame']
    end = videoData['endFrame']
    print(start, end)
    mp4 = findmp4()
    print(mp4)
    os.chdir(cwd)
    fileLoc = 'videos' + '\\' + str(index) + '\\' + mp4
    video = VideoFileClip(fileLoc).subclip(start - 4, end + 2).fx(
        vfx.fadeout, duration=1).fx(vfx.fadein, duration=5)
    # Make the text. Many more options are available.
    txt_clip = (TextClip(videoData['date'],
                         fontsize=35,
                         color='white',
                         font='Hans Kendrick').set_position(
                             ("center", 80)).set_duration(5).fx(
                                 vfx.fadeout, duration=1.5).fx(vfx.fadein,
                                                               duration=3))

    result = CompositeVideoClip([video, txt_clip])  # Overlay text on video

    result.write_videofile(
        "partitions\\" + str(index) +
        ".mp4")  # Write the partition into a new partition folder
    os.chdir(cwd)
    video.close()
    txt_clip.close()
    result.close()
示例#6
0
def centerMerge():
    clip1 = VideoFileClip("text.mp4", audio=False).resize([540, 1024])
    print(clip1.duration)
    clip3 = VideoFileClip("cut_video.mp4", has_mask=True, audio=True)
    video = CompositeVideoClip([clip1, clip3.set_position('center')])
    video.write_videofile("centermergr.mp4")  # 先不加音频
    video.close()
def write_video_file(file_path, pred_label_score, gt_info, save_dir):
    video_clip = VideoFileClip(file_path)
    text_clip = TextClip(txt=pred_label_score,
                         font='utils/SimHei.ttf',
                         color='white',
                         fontsize=32,
                         bg_color='black',
                         align='West').set_pos(
                             ("left", "top")).set_duration(video_clip.duration)
    compose_list = [video_clip, text_clip]
    if gt_info != "":
        gt_text_clip = TextClip(txt=gt_info,
                                font='utils/SimHei.ttf',
                                color='white',
                                fontsize=32,
                                bg_color='black',
                                align='East').set_pos(
                                    ("right", "bottom")).set_duration(
                                        video_clip.duration)
        compose_list.append(gt_text_clip)
    result = CompositeVideoClip(compose_list)
    video_name = os.path.basename(file_path)
    result.write_videofile(save_dir + "/" + video_name,
                           fps=25,
                           codec='libx264',
                           audio_codec='aac',
                           temp_audiofile='temp-audio.m4a',
                           remove_temp=True)
示例#8
0
def main(url, output):
    driver = webdriver.Chrome()
    remote_url = url
    driver.get(remote_url)
    
    png = chrome_takeFullScreenshot(driver)
    with open("website_image.png", 'wb') as f:
        f.write(png)

    driver.close()

    clip = ImageClip('website_image.png')
    
    video_width = int(clip.size[0] + 800)
    video_height = int(video_width/1.5)

    bg_clip = ColorClip(size=(video_width, video_height), color=[228, 220, 220])

    scroll_speed = 180
    total_duration = (clip.h - 800)/scroll_speed

    fl = lambda gf,t : gf(t)[int(scroll_speed*t):int(scroll_speed*t)+800,:]
    clip = clip.fl(fl, apply_to=['mask'])

    video = CompositeVideoClip([bg_clip, clip.set_pos("center")])
    video.duration = total_duration
    if not output.endswith('.mp4'):
        output += '.mp4'
    video.write_videofile(output, fps=26)
    os.remove('website_image.png')
示例#9
0
def merge_videos(filepath_1, filepath_2, filepath_out):
    """
    Overlay second video in the bottom right corner of the first video.
    """
    # If the video generation failed, merge fails
    if not os.path.isfile(filepath_1) or not os.path.isfile(filepath_2):
        print("Error: The filepath(s) are invalid.")
        return False

    # Merge original lesson video with Wav2Lip result video
    clip1 = VideoFileClip(fr'{filepath_1}')  # Use ./ instead of /
    clip2 = VideoFileClip(fr'{filepath_2}')

    clip2 = resize_clip_wrt(clip1, clip2)
    composite_clip = CompositeVideoClip([
        clip1,
        clip2.set_position(("right", "bottom")).set_start(0).crossfadein(1)
    ])

    # Use a temp audio file if audio is not working
    # It seems overriding an existing file will result in second video not running correctly
    try:
        # final_clip.write_videofile(r'./results/result_voice.mp4')
        composite_clip.write_videofile(fr'{filepath_out}',
                                       codec='libx264',
                                       audio_codec='aac',
                                       temp_audiofile='temp-audio.m4a',
                                       remove_temp=True)
        return True
    except Exception as e:
        print(e)
        return False
示例#10
0
def make_test_vid(note_vid, segments, fname):

    t = 0

    clips = []
    padding = 2
    i = 0

    for note, (start, end) in segments:
        clip = note_vid.subclip(start, end)
        clip = clip.set_start(t)

        clips.append(clip)

        txt = (TextClip("%d %s" % (i, note),
                        color='white',
                        font='Ubuntu-Bold',
                        fontsize=22).margin(1).margin(
                            top=30, left=30, opacity=0.0).set_pos(
                                ('left',
                                 'top')).set_duration(end - start +
                                                      padding).set_start(t))
        clips.append(txt)

        t += (end - start) + padding
        i += 1

        print(t, i)

    full_video = CompositeVideoClip(clips)
    print('full length %f' % full_video.duration)
    full_video.write_videofile(fname, threads=20)
示例#11
0
def create_mtg_gif(name, id, border):
    if border == 'm':  # Modern (post-8th Ed)
        card_upper_corner = (19, 38)
        gif_width = 202 - card_upper_corner[0]
        gif_height = 172 - card_upper_corner[1]
    elif border == 'c':  # Current (post-Magic 2015)
        card_upper_corner = (17, 34)
        gif_width = 204 - card_upper_corner[0]
        gif_height = 173 - card_upper_corner[1]
    else:  # Old (pre-8th Ed)
        card_upper_corner = (25, 30)
        gif_width = 196 - card_upper_corner[0]
        gif_height = 168 - card_upper_corner[1]

    mtg_card = Image.open(BytesIO(requests.get(get_mtg_image(id)).content))
    mtg_card = ImageClip(np.asarray(mtg_card)).resize((222, 310))

    get_giphy_gif(name)
    giphy_gif = (VideoFileClip(
        'giphy_gif.mp4',
        target_resolution=(gif_height, gif_width)).set_pos(card_upper_corner))

    if giphy_gif.duration < 2:
        giphy_gif = giphy_gif.fx(loop, n=1 + int(2 // giphy_gif.duration))

    mtg_gif = CompositeVideoClip([mtg_card, giphy_gif])
    mtg_gif = mtg_gif.set_start(0).set_duration(giphy_gif.duration)
    # mtg_gif.write_gif("mtg_gif.gif")
    mtg_gif.write_videofile("mtg_gif.mp4",
                            codec='libx264',
                            bitrate=str(np.power(10, 7)),
                            verbose=False,
                            progress_bar=False,
                            audio=False,
                            ffmpeg_params=['-pix_fmt', 'yuv420p'])
示例#12
0
def save_out(tracks, outfile=None, filetype='mp4'):

    out = []

    vids = [t for t in tracks if t['type'] == 'vid']
    texts = [t for t in tracks if t['type'] == 'text']

    for v in vids:
        c = VideoFileClip(v['content']).subclip(v['in'], v['in'] + v['duration'])
        c = c.set_start(v['start'])
        out.append(c)

    size = out[0].size

    for t in texts:
        c = create_sub(t['content'], size, rect_offset=195, min_height=55)
        c = c.set_start(t['start'])
        c = c.set_duration(t['duration'])
        out.append(c)

    final_clip = CompositeVideoClip(out)
    if outfile is None:
        outfile = 'msg_' + str(int(time.time())) + '.mp4'
    if filetype == 'gif':
        outfile = outfile.replace('.mp4', '.gif')
        final_clip.speedx(1.7).write_gif(outfile, fps=7, loop=1)
    else:
        final_clip.write_videofile(outfile, fps=24, codec='libx264')
    return outfile
示例#13
0
def mov_change_bg(avi, mov, name):
    logging.info('start processing {}'.format(name))
    clip1 = VideoFileClip(avi)
    clip3 = VideoFileClip(mov, has_mask=True)
    video = CompositeVideoClip([clip1, clip3])
    video.write_videofile(name, audio=True)
    video.close()
    logging.info('end processing {}'.format(name))
示例#14
0
def concat_video(file, count, td, output):
    d = clip_duration(file)
    video = CompositeVideoClip([
        VideoFileClip(file).set_start((d * i) - (td * i)).fx(
            transfx.crossfadein, td * (0 if i == 0 else 1))
        for i in range(0, count)
    ])
    video.write_videofile(output)
示例#15
0
def mergeClips():
    videoFileNames = readClip()
    clips = []
    for i in range(0, len(videoFileNames)):
        clips.append(VideoFileClip(videoFileNames[i]))

    print(clips)
    result = CompositeVideoClip([clips[0], clips[1], clips[2]])
    result.write_videofile("mergedNew.mp4", fps=60)
示例#16
0
def video_render(txt_file,image_file,sound_file,save_file):
        from moviepy.editor import ImageClip
        from moviepy.editor import CompositeVideoClip
        from moviepy.editor import CompositeAudioClip
        from moviepy.editor import TextClip
        from moviepy.editor import AudioFileClip
        from moviepy.editor import concatenate
        from moviepy.config import change_settings
        change_settings({"IMAGEMAGICK_BINARY": "/usr/local/bin/convert"})
        text=[]
        
        with open(txt_file,'r') as file:
            for lines in file:
                if lines!="\n":
                    text.append(lines.rstrip('\n'))
        durs=[]
        for i in text:            
            res = len(re.findall(r'\w+', i)) 
            if res/2>3:
                durs.append(res/2)
            else:
                durs.append(3)
        total_duration=sum(durs)
        
        a_clip = AudioFileClip(sound_file)
        if a_clip.duration<total_duration:
            new_audioclip = CompositeAudioClip([a_clip, a_clip.set_start(a_clip.duration-1)]).set_duration(total_duration+3)
        else:
            new_audioclip=a_clip.set_duration(total_duration+3)
        
        screen=(1920,1080)
        clip_list = []
        i=0
        for string in text:
            duration=durs[i]
            i+=1
            try:
                txt_clip = TextClip(string, fontsize = 70, color = 'white', method='caption',size=screen ).set_duration(duration).set_pos('center')
                clip_list.append(txt_clip)
            except UnicodeEncodeError:
                txt_clip = TextClip("Issue with text", fontsize = 70, color = 'white').set_duration(2) 
                clip_list.append(txt_clip)
        
        final_text_clip = concatenate(clip_list, method = "compose").set_start(3)  
            
        v_clip = ImageClip(image_file).set_duration(total_duration+3)
        video=CompositeVideoClip([v_clip, final_text_clip])
        # video = video.set_audio(AudioFileClip('sound/Serenity (1).mp3'))
        video = video.set_audio(new_audioclip)
        video.write_videofile(save_file, 
                              codec='libx264',
                              fps=10, 
                              threads=4,
                              audio_codec='aac', 
                              temp_audiofile='temp-audio.m4a', 
                              remove_temp=True
                              )
def add_text(content, font_size):
    '''
    add text on the top of video stream
    '''
    txt_clip = (TextClip(content, fontsize=font_size,
                         color='white').set_position('top').set_duration(
                             video.duration))
    result = CompositeVideoClip([video, txt_clip])
    result.write_videofile(new_file)
示例#18
0
def handleVideo(file_content):
    for i in range(0, len(file_content)):
        arr = file_content[i][0].split("-->")
        #列出音频文件文件夹下的所有文件 随机抽取音频插入剪切视频
        musicFiles = os.listdir(arr[1])
        musicFilesTempLength = len(musicFiles) - 1
        #视频信息
        clip = VideoFileClip(arr[0]).subclip(arr[2], arr[3])
        #如果music文件夹下只有一个音频文件 则取第0个文件 其余时候取随机位置的音频
        if musicFilesTempLength > 0:
            musicFilesIndex = random.randint(0, musicFilesTempLength)
        else:
            musicFilesIndex = 0

        audioClip = AudioFileClip(arr[1] + musicFiles[musicFilesIndex])
        video = clip.set_audio(audioClip)
        videoDuration = int(video.duration)
        # 获取mp3文件的长度 最多从音频文件的长度-video.duration的长度开始截取长度为video.duration的音频
        # 随机选择Music文件夹下的音频文件
        musicInfo = eyed3.load(arr[1] + musicFiles[musicFilesIndex])
        # 得到mp3文件的长度 单位为秒
        musicSecs = int(format(musicInfo.info.time_secs))
        # 随机开始位置 确保加上video.duration之后不超过mp3文件的总长  最后几秒基本没有声音了 所以最多截到倒数第10秒
        #如果所截视频长度大于音频长度 则从音频的0秒开始截取
        if musicSecs - videoDuration <= 0:
            musicStart = 0
            musicEnd = musicSecs

        else:
            musicStart = random.randint(0, musicSecs - videoDuration)
            musicEnd = musicStart + videoDuration

        # 从随机位置开始截取音频 并设置淡入淡出效果
        video.audio = video.audio.subclip(musicStart, musicEnd)
        try:
            video.audio = audio_fadein(video.audio, 2.0)
            video.audio = audio_fadeout(video.audio, 2.0)
        except Exception as e:
            with open('error.txt', mode="w+") as f:
                f.write(str(e))

        # 设置视频尺寸 设置视频居中显示
        result = CompositeVideoClip([video.set_pos(('center'))],
                                    size=(1366, 728))
        #创建存储目录
        outputVideoPath = arr[1] + "../outputVideo/"
        isExists = os.path.exists(outputVideoPath)
        if not isExists:
            os.makedirs(outputVideoPath)
        result.write_videofile(outputVideoPath + "outputVideo" + str(i) +
                               ".mp4",
                               codec="libx264",
                               fps=15,
                               bitrate="512K",
                               audio_fps=44100,
                               audio_bitrate="128k")
        i += 1
示例#19
0
	def scaleVideo(self, fileLocation):
		(name, extension) = self.__removeExtension(fileLocation)

		clip = VideoFileClip(fileLocation)
		currentFPS = clip.fps
		# clip = clip.resize(0.75)
		final = CompositeVideoClip([clip])

		newPath = name + "-scaled" + extension
		final.write_videofile(newPath, fps=currentFPS/2, codec='libx264')
def compose(text, duration=4.0, outname="sunset_words.mp4"):
    start = 108
    end = start + duration
    clip1 = VideoFileClip("sunset.mp4").subclip(start, end).resize(
        (1920 / 2, 1080 / 2))
    clip2 = TextClip(text, size=clip1.size).set_duration(4)

    composition = CompositeVideoClip([clip1, clip2])

    composition.write_videofile(outname)
示例#21
0
def merge_videos(path_source_1, path_source_2, mixed_path):

    clip01 = VideoFileClip(path_source_1)
    clip02 = VideoFileClip(path_source_2)

    clip01 = clip01.resize(0.60)
    clip02 = clip02.resize(0.60)

    final_clip = CompositeVideoClip([clip01.set_position(("left","center")), clip02.set_position(("right","center"))], size=(720, 460))
    final_clip.write_videofile(mixed_path)
示例#22
0
def visualize(model_cls, input_data):
    os.environ["FFMPEG_BINARY"] = "ffmpeg"

    model = model_cls()
    output = model.encode(input_data)
    output = output.reshape(output.shape[0] * 512, 128)
    min_val = np.amin(output)
    max_val_normalized = np.amax(output) - min_val

    last_percentage = -1
    figures = []

    # (graph total duration / graph datapoint count) * (graph datapoint count / graph width)
    figure_snapshot_rate = 40
    tick_to_sample_ratio = 32.87890625  # This is still off sync with the audio, 2:53 becomes 2:58 for some reason
    frame_duration = (figure_snapshot_rate * tick_to_sample_ratio) / 44100
    for i in range(128):
        column = i % 16
        row = int(i / 16)
        figures.append(Figure(60, 60, row, column, frame_duration))

    print(f"Rendering output: {output.shape}")
    for index, entry in enumerate(output):
        should_snapshot = index % figure_snapshot_rate == 0

        for plot_index, plot in enumerate(figures):
            plot.push((entry[plot_index] - min_val) / max_val_normalized)

            if should_snapshot:
                plot.snapshot()

        percentage = int(index / len(output) * 100)
        if percentage % 1 == 0 and last_percentage != percentage:
            last_percentage = percentage
            print(f"Capturing figures: {percentage}%...")

    print(f"{len(figures[0].figures)} figure frames rendered")
    clips = [FigureClip(figure) for figure in figures]

    audio_filename = f"vis/output.wav"
    output = model.predict_output(input_data).flatten()
    write_wav(audio_filename, output)

    del model
    backend.clear_session()

    audio = AudioFileClip(audio_filename)
    audio = audio.set_start(0)
    audio = audio.set_duration(
        min(audio.duration, frame_duration * len(figures[0].figures)))

    result = CompositeVideoClip(clips, size=(16 * 66 + 12, 8 * 66 + 12))
    result = result.set_audio(audio)
    result.write_videofile("vis/output.mp4", fps=1 / frame_duration)
示例#23
0
def mergeVideos():
    clip1 = VideoFileClip("text.mp4").subclip(0, 15)
    print(clip1.duration)
    clip2 = VideoFileClip("mhls.mp4").subclip(0, 15).resize(0.5)
    video = CompositeVideoClip([clip1,
                                clip2
                                ])
    CompositeVideoClip([clip1.set_pos("left", "center"), clip2.set_pos("right", "center")],
                       size=(clip1.w + clip1.w, clip2.h))

    video.write_videofile("merge_video.mp4")
def write_text(text_chunks, original_video, output, font_size=15):
    txt_clips = []
    for i in range(len(text_chunks)):
        txt_clips.append(TextClip(text_chunks[i], fontsize=font_size, color="yellow")
                         .set_position('bottom')
                         .set_duration(3)
                         .set_start(i * 3))
    clips = [original_video]
    clips.extend(txt_clips)
    result = CompositeVideoClip(clips)
    result.write_videofile(output)
示例#25
0
def make_sentence(sayer, text):
    word_list = text.lower().split(" ")

    list_dir = os.listdir(s_dir)
    sentences = sorted(list_dir)
    wrote = 0
    cuts_dirs = []
    cuts = []
    not_found = []
    for single in word_list:
        print "Word = " + single
        count = 0
        flag = 0
        for sen in sentences:
            print "Searhing sentence #" + str(count)
            words = os.listdir(s_dir + "/" + sen + "/words")
            words_dir = s_dir + "/" + sen + "/words"
            for word in words:
                if single == word and (single in sen.lower()):
                    cut_single = words_dir + "/" + single + "/0.mp4"
                    cuts_dirs.append(cut_single)
                    flag = 1
                    break
            count += 1
            if flag == 1:
                break

        if flag == 0:
            not_found.append(single)
    if not len(not_found) == 0:
        print "Keywords that are not found are: " + str(not_found)
        return
    video_cuts = []
    for file in cuts_dirs:
        print file
        video = VideoFileClip(file)
        video = video.resize((1280, 720))
        video_cuts.append(video)
    final = concatenate_videoclips(video_cuts)

    word_text = TextClip(text, fontsize=40, color="white",
                         bg_color="black").set_pos("bottom").set_duration(
                             final.duration)
    final = CompositeVideoClip([final, word_text])
    if not os.path.exists(sayer + "* " + text):
        os.makedirs(sayer + "* " + text, 0777)
    final.write_videofile(sayer + "* " + text + "/" + text + ".mp4",
                          codec='libx264',
                          audio_codec='aac',
                          temp_audiofile=sayer + "* " + text + "/" + text +
                          ".m4a",
                          remove_temp=True)
示例#26
0
def add_zm(fg_in_bg_avi, zm_video_path, pictrue_name, video_name, output):
    clip1 = VideoFileClip(fg_in_bg_avi)
    clip3 = VideoFileClip(zm_video_path, has_mask=True)
    video = CompositeVideoClip([clip1, clip3])
    try:
        os.mkdir(output)
    except OSError:
        pass
    video_name = video_name.split('/', 2)[-1]
    name = output + '/' + pictrue_name + video_name + ".mp4"
    video.write_videofile(name, audio=True)  # 先不加音频
    video.close()
    return name
示例#27
0
def textclip():
    text_1 = (TextClip("剪辑不易 \n 关注啊", fontsize=70, color='white', font="FZZJ.TTF")
              .set_position(lambda t: ('center', 100 - t))
              .set_duration(15))
    text_2 = (TextClip("赶快右侧 \n 点赞啦", fontsize=70, color='red', font="FZZJ.TTF")
              .set_position(lambda t: ('center', 900 - t))
              .set_duration(15))

    clip1 = VideoFileClip("cut_video.mp4").fx(vfx.mirror_x)
    print(clip1.duration)
    result = CompositeVideoClip([clip1.set_position('center'), text_1, text_2],
                                size=[540, 1024])  # Overlay text on video
    result.write_videofile("final.mp4")
示例#28
0
def write_video(out_path, audio_clip, lyric_clips):
    [print(c.duration) for c in lyric_clips]
    print(audio_clip.duration)
    full_text_clip = concatenate_videoclips(lyric_clips)
    video = CompositeVideoClip(clips=[
        ColorClip(size=VIDEO_SIZE,
                  color=BACKGROUND_COLOR,
                  duration=audio_clip.duration),
        full_text_clip,
    ], )
    video.fps = 24
    video.audio = audio_clip
    video.write_videofile(out_path)
示例#29
0
def make_video(animations, video, path, video_file_name):
    path = path
    background = VideoFileClip(path + video['name'])
    clips = [background]
    for index, animation in enumerate(animations):
        to_clip = path + animation['name']
        start_time = animation['start_time']
        clip = VideoFileClip(to_clip)
        masked_clip = clip.fx(vfx.mask_color, color=[0, 0, 0], thr=35, s=40)
        clips.append(masked_clip.set_start(start_time))

    video = CompositeVideoClip(clips)
    video.write_videofile(f"{path}{video_file_name}.mp4")
    video.close()
示例#30
0
def vid_watermark(vid, out, kind, caption, channel) -> str:
    try:
        logo_dir = "logo/{}.png".format(channel.name)
        pattern = re.compile(r':\d:')
        div = 5
        find = int(re.findall(pattern, caption)[0][1:-1]) if re.search(
            pattern, caption) else channel.pos
        audio = False if kind == 'animation' else True
        clip = VideoFileClip(vid, audio)
        w, h = clip.size

        pos = {
            1: ('left', 'top'),
            2: ('center', 'top'),
            3: ('right', 'top'),
            4: ('left', 'center'),
            5: ('center', 'center'),
            6: ('right', 'center'),
            7: ('left', 'bottom'),
            8: ('center', 'bottom'),
            9: ('right', 'bottom')
        }
        size = h // div if w > h else w // div

        if os.path.exists(logo_dir):
            logo = ImageClip(logo_dir) \
                .set_duration(clip.duration) \
                .resize(width=size, height=size) \
                .set_pos(pos.get(find))
        else:
            logo = logo_by_name(channel)\
                .set_duration(clip.duration)\
                .resize(width=size, height=size)\
                .set_pos(pos.get(find))

        final = CompositeVideoClip([clip, logo])
        final.write_videofile(filename=out,
                              logger=None,
                              verbose=False,
                              threads=multiprocessing.cpu_count())

        if re.search(pattern, caption):
            caption = id_remove(re.sub(pattern, '', caption), channel)
        else:
            caption = id_remove(caption, channel)
        return caption

    except Exception as E:
        logging.error('vid_watermark {}'.format(E))
示例#31
0
    def render(self, talk, act, exp, move, render_video, dft_exp_dt=0.2):
        if self.cache_dir is not None:
            cache_video = '{}.mp4'.format(
                get_macro_act_key(talk, act, exp, move))
            cache_video = os.path.join(self.cache_dir, cache_video)
            if os.path.exists(cache_video):
                clip = VideoFileClip(cache_video)
                clip.write_videofile(render_video)
                return

        act_clip = self.act_assets[act]
        default_exp_clip = self.exp_assets['null']
        exp_clip = self.exp_assets[exp]

        if talk == '':
            clips = [
                act_clip,
                default_exp_clip.set_position(
                    lambda t: (291, 160)).set_duration(dft_exp_dt)
            ]
        else:
            talk_clip = TextClip(talk,
                                 font='data/SimHei.ttf',
                                 color='green',
                                 method='caption',
                                 fontsize=30)
            clips = [
                act_clip,
                talk_clip.set_position(('center', 50)),
                default_exp_clip.set_position(
                    lambda t: (291, 160)).set_duration(dft_exp_dt)
            ]

        clips.append(
            exp_clip.set_position(lambda t: (291, 160)).set_start(dft_exp_dt))
        ts = dft_exp_dt + exp_clip.duration
        if ts < act_clip.duration:
            clips.append(
                default_exp_clip.set_position(
                    lambda t: (291, 160)).set_duration(act_clip.duration -
                                                       ts).set_start(ts))

        if move != 'null':
            move_clip = self.move_assets[move]
            clips.append(move_clip.set_position(('center', 650)))

        final_clip = CompositeVideoClip(clips).set_duration(act_clip.duration)
        final_clip.write_videofile(render_video)
示例#32
0
def generateVideo(text, line2, vId):
    sum = 0
    fnlTxt = ""
    lines = [[], []]
    for ind, c in enumerate(text.split(" ")):
        if sum + len(c) <= charLimit:
            lines[0] += c
            if ind != 0:
                fnlTxt += " "
            fnlTxt += c
            sum += len(c)
        elif sum + len(c) <= charLimit * 2:
            if len(lines[1]) == 0:
                fnlTxt += "\n"
            if len(lines[1]) != 0:
                fnlTxt += " "
            lines[1] += c
            fnlTxt += c
            sum += len(c)
    print(fnlTxt)
    # Create the text
    txt_clip = (TextClip(
        fnlTxt, fontsize=ftSz, color="white", font=fontPth,
        align="West").set_position("left").set_start(0.6).set_duration(1.4))
    line2C = (TextClip(
        line2, fontsize=ftSz, color="white", font=fontPth,
        align="center").set_position("left").set_start(2).set_duration(2.4))
    line2C = line2C.set_position(lambda t: (w * 0.1, 1.8 * h / 6))
    line2E = (TextClip(line2,
                       fontsize=ftSz,
                       color="white",
                       font=fontPth,
                       align="center").set_position("left").set_start(
                           4.4).set_duration(0.6).fadeout(0.6))
    line2E = line2E.set_position(lambda t: (w * 0.1, 1.8 * h / 6))
    txt_mov = txt_clip.set_pos(lambda t: (  # animate the text
        min((w * 0.1), int(-txt_clip.w - 500 + 2.7 * w * t)),  # X
        max(1.8 * h / 6, int(100 * t)),  # Y  # Y
    ))
    rName = text + "." + ext
    nName = f"{vId}.{ext}"
    result = CompositeVideoClip([video, txt_mov, line2C,
                                 line2E])  # Overlay text on video
    result.write_videofile(rName, fps=video.reader.fps)  # Many options...
    # Moves the video file to the render directory
    shutil.move(rName, "render/" + nName)
    return filename
    def build__videos(self, option, skipbuild, interval):
        # video output file name
        vid_out_file = self.vid_build_name(option, interval)
     
        # build the vine compilation
        if not skipbuild:
            clips = []
         
            # add the intro
            intro = self.add_intro()
            clips.append(intro)
            currloc = intro.duration
         
            # generate list of all available vids
            (tmpclips, currloc, totalcliptime) = self.generate_vid_list(\
                                                                option, currloc)
          
            # set the background image
            clip= self.add_background(intro, totalcliptime)
            clips.append(clip)
              
            # add list of individual vids
            clips.extend(tmpclips)
          
            # add the outro
            outro = self.add_outro(currloc)
            clips.append(outro)
              
            # add previous days best video to outro
            best_vid_clip = self.get_previous_bests_vid(option, currloc)
            clips.append(best_vid_clip)
          
            # finalize the video file
            final_clip = CompositeVideoClip(clips, size=(1920,1080))
            final_clip.fps=30
            final_clip.write_videofile(vid_out_file)

        return vid_out_file
示例#34
0
文件: pitch.py 项目: jaflo/misc
def poop(source, destination, midi_file, stretch, fadeout, rebuild, max_stack):
    """
    Create multiple pitchshifted versions of source video and arrange them to
    the pattern of the midi_file, also arrange the video if multiple notes play
    at the same time.
    """

    print "Reading input files"
    video = VideoFileClip(source, audio=False)
    """
    Non-main tracks are 30% the size of the main and have a white border and a
    margin around them.
    """
    smaller = video.resize(0.3)\
        .margin(mar=2, color=3*[255])\
        .margin(mar=8, opacity=0)
    audio = AudioFileClip(source, fps=44100)
    mid = MidiFile(midi_file)
    ignoredtracks = ["Percussion", "Bass"]

    print "Analysing MIDI file"
    notes = []   # the number of messages in each track
    lowest = 127 # will contain the lowest note
    highest = 0  # will contain the highest note
    for i, track in enumerate(mid.tracks):
        notes.append(0)
        #if track.name in ignoredtracks: continue
        for message in track:
            if message.type == "note_on":
                lowest = min(lowest, message.note)
                highest = max(highest, message.note)
                notes[-1] += 1
    """
    The main track is the one featured in the center. It is probably the one
    with the most notes. Also record the lowest, highest, and average note to
    generate the appropriate pitches.
    """
    maintrack = max(enumerate(notes), key=lambda x: x[1])[0]
    midpitch = int((lowest+highest)/2)
    print "Main track is probably", str(maintrack)+":", mid.tracks[maintrack].name
    mid.tracks.insert(0, mid.tracks.pop(maintrack)) # move main track to front
    notes.insert(0, notes.pop(maintrack)) # move main note count to front
    print sum(notes), "notes ranging from", lowest, "to", highest, "centering around", midpitch

    print "Transposing audio"
    sound = audio.to_soundarray(fps=44100) # source, original audio
    tones = range(lowest-midpitch, highest-midpitch) # the range of pitches we need
    pitches = [] # this will contain the final AudioFileClips
    if not os.path.exists("pitches/"):
        print "Creating folder for audio files"
        os.makedirs("pitches/")
    for n in tones:
        """
        Pitches only need to be generated if they do not already exist or if
        we force the creation of new ones. Save them in order in pitches.
        """
        name = "pitches/"+source+"_"+str(n)+".mp3"
        if not os.path.isfile(name) or rebuild:
            print "Transposing pitch", n
            splitshift(sound, n).write_audiofile(name)
        pitches.append(AudioFileClip(name, fps=44100))

    print "Adding video clips"
    clips = [video.set_duration(1)] # to set the video size
    positions = [("left", "bottom"), ("right", "bottom"), ("left", "top"),
        ("right", "top"), ("center", "bottom"), ("center", "top"),
        ("left", "center"), ("right", "center")] # non-main tracks
    """
    curpos is the current corner position on the screen and changes with each track.
    cache is used to make a unique file name whenever a new temporary file is created.
    endtime will be used at the end to set the end TextClip. It is the latest time any clip ends.
    """
    curpos = -2
    cache = endtime = 0
    for i, track in enumerate(mid.tracks):
        #if track.name in ignoredtracks: continue
        print("Processing {} notes: {}".format(notes[i], track.name))
        t = 1.0 # not 0 because we added one second of original video for size
        opennotes = [] # will contain all notes that are still playing
        curpos += 1
        for message in track:
            if not isinstance(message, MetaMessage):
                message.time *= stretch
                t += message.time
                if message.type == "note_on":
                    """
                    Add a video clip with the appropriate starting time and
                    pitch. Also add an entry to opennotes (we don't know when
                    the note ends yet).
                    """
                    part = video
                    mainvid = i is 0# and len(opennotes) is 0
                    if not mainvid: part = smaller
                    part = part\
                        .set_audio(pitches[min(len(pitches)-1, max(0, message.note-lowest))])\
                        .set_start(t/1000)
                    opennotes.append((message.note, len(clips), t))
                    """
                    If this isn't the main track, the video will be smaller and
                    placed at the edge. We'll get a position for each track.
                    If there is more than one video playing in this track, it
                    will be placed slighly closer to the center.
                    """
                    if not mainvid:
                        stackheight = 6
                        part = part.set_position(positions[curpos % len(positions)])
                    clips.append(part)
                elif message.type == "note_off":
                    reference = message.note
                    index = 0
                    """
                    Find the note that ended in opennotes using the note.
                    Get the index and start time, remove it from opennotes.
                    """
                    for note in reversed(opennotes):
                        n, j, d = note
                        if n == reference:
                            index = j
                            opennotes.remove(note)
                            break
                    """
                    Get the clip for the open note, set its time to the
                    difference between time now and start time. Have it fade out
                    and update the endtime if needed.
                    """
                    clips[index] = clips[index].set_duration((t-d)/1000+fadeout)
                    clips[index] = clips[index].crossfadeout(fadeout)
                    endtime = max(endtime, t/1000+fadeout)
                if len(clips) == max_stack:
                    """
                    To save some memory, the clips in memory are emptied
                    whenever they reach a certain size. All clips that are closed
                    are merged into one file on disk.
                    """
                    upuntil = len(clips) # the first open note
                    if len(opennotes) > 0: _, upuntil, _ = opennotes[0]
                    stillopen = clips[upuntil:]
                    print "Stack reached", len(clips), "clips, merging", upuntil
                    """
                    Save a temporary file to disk with all clips we can safely
                    discard from clips.
                    """
                    newcache = destination+".temporary"+str(cache)+".mp4"
                    CompositeVideoClip(clips[:upuntil]).write_videofile(newcache)
                    cache += 1
                    """
                    Shift all opennotes' indices down by the number of clips
                    merged and saved to disk. Set clips to be the new, merged
                    clip and any leftover clips.
                    """
                    for i, note in enumerate(opennotes):
                        n, j, d = note
                        opennotes[i] = (n, j-upuntil+1, d)
                    clips = [VideoFileClip(newcache)]+stillopen

    end = TextClip("pitch.py", font="Arial", color="white", fontsize=70)\
        .set_pos("center")\
        .set_duration(1)\
        .set_start(endtime)
    clips.append(end) # add an ending frame

    """
    Combine all leftover clips, write them to the final file and remove
    temporary files created before.
    """
    print "Combining", len(clips), "clips"
    final = CompositeVideoClip(clips).set_start(1)
    final.write_videofile(destination)
    clips = []
    if cache == 1:
        print "Removing one temporary file"
    elif cache > 1:
        print "Removing", cache, "temporary files"
    for i in range(0, cache):
        os.remove(destination+".temporary"+str(i)+".mp4")