示例#1
0
 def prepend_intertitle(
     self,
     size: Optional[Size] = None,
     color: str = DEFAULT_INTERTITLE_COLOR,
     font: str = DEFAULT_INTERTITLE_FONT,
     fontsize: int = DEFAULT_INTERTITLE_FONTSIZE,
     position: str = DEFAULT_INTERTITLE_POSITION,
     duration: int = DEFAULT_INTERTITLE_DURATION,
 ):
     if not self.meta.text:
         logger.warning('%s: Missing intertitle text')
         return
     logger.info('%s: Intertitle "%s"', self.meta.path, self.meta.text)
     if not size:
         size = Size(width=self.video_file_clip.w,
                     height=self.video_file_clip.h)
     text_clip = TextClip(
         self.meta.text.replace('|', '\n'),
         size=(size.width * INTERTITLE_TEXT_WIDTH_FACTOR, None),
         color=color,
         font=font,
         fontsize=fontsize,
         method='caption',
         align='center',
     )
     composite_clip = CompositeVideoClip([text_clip.set_pos(position)],
                                         (size.width, size.height))
     intertitle_clip = composite_clip.subclip(0, duration)
     self.video_file_clip = concatenate_videoclips(
         [intertitle_clip, self.video_file_clip], method='compose')
示例#2
0
def make_test_vid(note_vid, segments, fname):

    t = 0

    clips = []
    padding = 2
    i = 0

    for note, (start, end) in segments:
        clip = note_vid.subclip(start, end)
        clip = clip.set_start(t)

        clips.append(clip)

        txt = (TextClip("%d %s" % (i, note),
                        color='white',
                        font='Ubuntu-Bold',
                        fontsize=22).margin(1).margin(
                            top=30, left=30, opacity=0.0).set_pos(
                                ('left',
                                 'top')).set_duration(end - start +
                                                      padding).set_start(t))
        clips.append(txt)

        t += (end - start) + padding
        i += 1

        print(t, i)

    full_video = CompositeVideoClip(clips)
    print('full length %f' % full_video.duration)
    full_video.write_videofile(fname, threads=20)
示例#3
0
 def create_thumbnail(self, clip: Clip):
     logging.info("Creating yt thumbnail")
     thumbnail_base = self.get_thumbnail_base(clip)
     emoji = self.get_emoji()
     overlay = ImageClip(os.path.join(self.asset_path, "overlay_thumbnail.png")).set_opacity(0.8)
     number = self.get_number_textclip()
     try:
         logo = (
             ImageClip(os.path.join(self.asset_path, utils.get_valid_game_name(self.game), "game_logo.png"))
             .fx(resize, 1.3)
             .set_position((0.04, 0.6), relative=True)
         )
     except FileNotFoundError:
         logging.warning("No game_logo in associated asset folder -> thumbnail will be created without logo")
         logo = None
     thumbnail = [
         thumbnail_base.set_duration(None),
         emoji.set_duration(None),
         overlay.set_duration(None),
         number.set_duration(None),
     ]
     if logo:
         thumbnail.append(logo.set_duration(None))
     thumbnail_result = CompositeVideoClip(thumbnail, size=[1280, 720])
     thumbnail_result.save_frame(os.path.join(self.compilation_dir, "thumbnail.png"), t=0, withmask=True)
示例#4
0
def make_zoom(scale_func, path=im_path, cx=32, cy=32, scale=10, duration=5, fps=10,
                                    oversample=2.0):
    ic = ImageClip(path).resize(oversample)
    bg = ColorClip((ic.w, ic.h), (0xFF, 0xFF, 0xFF)).set_duration(duration)
    
    ic.duration = duration
    cx *= oversample
    cy *= oversample
    total_frames = int(duration * fps)
        
    def zoom_between_frames(startf, endf):
        scales = [scale_func(startf + f * (endf - startf) / total_frames)
                  for f in range(total_frames)]
        return make_zoom_movie(ic, scales, fps, (cx, cy))
     
    # we seem to get two multiple frames at the start... 
    # and end sometimes
    ret = CompositeVideoClip([
        bg,
        zoom_between_frames(total_frames, 2.0 * total_frames),
        zoom_between_frames(0, total_frames)
        ])
    ret.size = ic.size
    # ret.duration = duration
    return ret.resize(1.0/oversample)
示例#5
0
def create_mtg_gif(name, id, border):
    if border == 'm':  # Modern (post-8th Ed)
        card_upper_corner = (19, 38)
        gif_width = 202 - card_upper_corner[0]
        gif_height = 172 - card_upper_corner[1]
    elif border == 'c':  # Current (post-Magic 2015)
        card_upper_corner = (17, 34)
        gif_width = 204 - card_upper_corner[0]
        gif_height = 173 - card_upper_corner[1]
    else:  # Old (pre-8th Ed)
        card_upper_corner = (25, 30)
        gif_width = 196 - card_upper_corner[0]
        gif_height = 168 - card_upper_corner[1]

    mtg_card = Image.open(BytesIO(requests.get(get_mtg_image(id)).content))
    mtg_card = ImageClip(np.asarray(mtg_card)).resize((222, 310))

    get_giphy_gif(name)
    giphy_gif = (VideoFileClip(
        'giphy_gif.mp4',
        target_resolution=(gif_height, gif_width)).set_pos(card_upper_corner))

    if giphy_gif.duration < 2:
        giphy_gif = giphy_gif.fx(loop, n=1 + int(2 // giphy_gif.duration))

    mtg_gif = CompositeVideoClip([mtg_card, giphy_gif])
    mtg_gif = mtg_gif.set_start(0).set_duration(giphy_gif.duration)
    # mtg_gif.write_gif("mtg_gif.gif")
    mtg_gif.write_videofile("mtg_gif.mp4",
                            codec='libx264',
                            bitrate=str(np.power(10, 7)),
                            verbose=False,
                            progress_bar=False,
                            audio=False,
                            ffmpeg_params=['-pix_fmt', 'yuv420p'])
def write_video_file(file_path, pred_label_score, gt_info, save_dir):
    video_clip = VideoFileClip(file_path)
    text_clip = TextClip(txt=pred_label_score,
                         font='utils/SimHei.ttf',
                         color='white',
                         fontsize=32,
                         bg_color='black',
                         align='West').set_pos(
                             ("left", "top")).set_duration(video_clip.duration)
    compose_list = [video_clip, text_clip]
    if gt_info != "":
        gt_text_clip = TextClip(txt=gt_info,
                                font='utils/SimHei.ttf',
                                color='white',
                                fontsize=32,
                                bg_color='black',
                                align='East').set_pos(
                                    ("right", "bottom")).set_duration(
                                        video_clip.duration)
        compose_list.append(gt_text_clip)
    result = CompositeVideoClip(compose_list)
    video_name = os.path.basename(file_path)
    result.write_videofile(save_dir + "/" + video_name,
                           fps=25,
                           codec='libx264',
                           audio_codec='aac',
                           temp_audiofile='temp-audio.m4a',
                           remove_temp=True)
示例#7
0
def main():
    clips = []
    with open("names.txt") as f:
        name = f.readlines()
        print(name)
        for i in name:
            i = i.split('\n')[0]
            clips.append(make(i))
    print(clips)
    concatenate_videoclips(clips).set_fps(30).write_videofile("飞跃起点理.mp4")
    exit()
    clip1 = ImageClip("./images/2.jpg")
    txt = TextClip("吼哇!123ASDasd".encode("utf-8"),
                   font="SimSun",
                   color='white',
                   fontsize=48)
    txt_col = txt.on_color(size=(clip1.w, txt.h + 10),
                           color=(0, 0, 0),
                           pos=(6, 'center'),
                           col_opacity=0.6).set_pos(lambda t: ((200), (800)))
    w, h = moviesize = clip1.size
    txt_mov = txt_col.set_pos(lambda t: (max(w / 30, int(w - 1 * w * t)),
                                         max(5 * h / 6, int(100 * t))))

    CompositeVideoClip([
        clip1, txt_mov
    ]).set_duration(1).set_fps(30).write_videofile("my_concatenation.mp4")
    CompositeVideoClip([clip1, txt_mov
                        ]).set_duration(1).set_fps(30).save_frame("test.png",
                                                                  t="00:00:01")
示例#8
0
def gifEngine(starttime,
              endtime,
              videofileloc,
              srtfileloc,
              outfileloc,
              logger='gifEngine.log'):
    logging.basicConfig(filename=logger, level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    prolog.basic_config()
    # creating the initial GIF
    try:
        generator = lambda txt: TextClip(
            txt, font='Impact', fontsize=28, color='white')
        video = VideoFileClip(videofileloc)
        sub = SubtitlesClip(srtfileloc, generator).set_position(
            ("center", "bottom"), relative=True)
        composite = CompositeVideoClip([video, sub])
        composite = composite.subclip(starttime, endtime)
        composite.write_gif(outfileloc,
                            program='ffmpeg',
                            opt='palettegen',
                            logger=logger,
                            verbose=True)  # using new palettegen opt
        return 0
    except (IOError, OSError) as err:
        return err
def generate_video(saved_model_path, video_category=None):
    """Uses the trained model to predict the frames and produce a video out of them"""
    # load model
    model = load_model(saved_model_path)

    which_one = video_category
    train_files, test_files = get_train_test_files(which=which_one)
    test_gen = get_data_gen(files=test_files,
                            timesteps=timesteps,
                            batch_size=batch_size,
                            im_size=(im_width, im_height))

    y_true = []
    y_pred = []

    for _ in range(200):
        x, y = next(test_gen)
        y_true.extend(y)

        predictions = model.predict_on_batch(x)
        y_pred.extend(predictions)

    clip1 = ImageSequenceClip([denormalize(i) for i in y_true], fps=5)
    clip2 = ImageSequenceClip([denormalize(i) for i in y_pred], fps=5)
    clip2 = clip2.set_position((clip1.w, 0))
    video = CompositeVideoClip((clip1, clip2), size=(clip1.w * 2, clip1.h))
    video.write_videofile(
        "{}.mp4".format(which_one if which_one else "render"), fps=5)
示例#10
0
def main(url, output):
    driver = webdriver.Chrome()
    remote_url = url
    driver.get(remote_url)
    
    png = chrome_takeFullScreenshot(driver)
    with open("website_image.png", 'wb') as f:
        f.write(png)

    driver.close()

    clip = ImageClip('website_image.png')
    
    video_width = int(clip.size[0] + 800)
    video_height = int(video_width/1.5)

    bg_clip = ColorClip(size=(video_width, video_height), color=[228, 220, 220])

    scroll_speed = 180
    total_duration = (clip.h - 800)/scroll_speed

    fl = lambda gf,t : gf(t)[int(scroll_speed*t):int(scroll_speed*t)+800,:]
    clip = clip.fl(fl, apply_to=['mask'])

    video = CompositeVideoClip([bg_clip, clip.set_pos("center")])
    video.duration = total_duration
    if not output.endswith('.mp4'):
        output += '.mp4'
    video.write_videofile(output, fps=26)
    os.remove('website_image.png')
示例#11
0
def make_zoom(scale_func,
              path=im_path,
              cx=32,
              cy=32,
              scale=10,
              duration=5,
              fps=10,
              oversample=2.0):
    ic = ImageClip(path).resize(oversample)
    bg = ColorClip((ic.w, ic.h), (0xFF, 0xFF, 0xFF)).set_duration(duration)

    ic.duration = duration
    cx *= oversample
    cy *= oversample
    total_frames = int(duration * fps)

    def zoom_between_frames(startf, endf):
        scales = [
            scale_func(startf + f * (endf - startf) / total_frames)
            for f in range(total_frames)
        ]
        return make_zoom_movie(ic, scales, fps, (cx, cy))

    # we seem to get two multiple frames at the start...
    # and end sometimes
    ret = CompositeVideoClip([
        bg,
        zoom_between_frames(total_frames, 2.0 * total_frames),
        zoom_between_frames(0, total_frames)
    ])
    ret.size = ic.size
    # ret.duration = duration
    return ret.resize(1.0 / oversample)
示例#12
0
 def Gen_Video(self, beat_times, mp3path, uuid):
     FONT_URL = '../font/heimi.TTF'
     with open(uuid + '.txt', 'r', encoding='utf-8') as f:
         text_str = f.read()
     word_list = text_str.split('\n')
     clips = []
     for index, beat_time in enumerate(beat_times[:-1]):
         if index >= len(word_list):
             break
         print(f'{index + 1}/{len(beat_times)}——{word_list[index]}')
         text_clip = TextClip(
             word_list[index],
             fontsize=320 // 8,
             color='white',
             size=(320, 640),
             method='caption',
             font=FONT_URL) \
             .set_start(beat_time) \
             .set_end(beat_times[index + 1])
         text_clip = text_clip.set_pos('center')
         clips.append(text_clip)
     final_clip = CompositeVideoClip(clips)
     audio_clip = AudioFileClip(mp3path)
     final_video = final_clip.set_audio(audio_clip)
     final_video.write_videofile(str(uuid) + '.mp4',
                                 fps=30,
                                 codec='mpeg4',
                                 preset='ultrafast',
                                 audio_codec="libmp3lame",
                                 threads=4)
示例#13
0
def merge_videos(filepath_1, filepath_2, filepath_out):
    """
    Overlay second video in the bottom right corner of the first video.
    """
    # If the video generation failed, merge fails
    if not os.path.isfile(filepath_1) or not os.path.isfile(filepath_2):
        print("Error: The filepath(s) are invalid.")
        return False

    # Merge original lesson video with Wav2Lip result video
    clip1 = VideoFileClip(fr'{filepath_1}')  # Use ./ instead of /
    clip2 = VideoFileClip(fr'{filepath_2}')

    clip2 = resize_clip_wrt(clip1, clip2)
    composite_clip = CompositeVideoClip([
        clip1,
        clip2.set_position(("right", "bottom")).set_start(0).crossfadein(1)
    ])

    # Use a temp audio file if audio is not working
    # It seems overriding an existing file will result in second video not running correctly
    try:
        # final_clip.write_videofile(r'./results/result_voice.mp4')
        composite_clip.write_videofile(fr'{filepath_out}',
                                       codec='libx264',
                                       audio_codec='aac',
                                       temp_audiofile='temp-audio.m4a',
                                       remove_temp=True)
        return True
    except Exception as e:
        print(e)
        return False
示例#14
0
def save_out(tracks, outfile=None, filetype='mp4'):

    out = []

    vids = [t for t in tracks if t['type'] == 'vid']
    texts = [t for t in tracks if t['type'] == 'text']

    for v in vids:
        c = VideoFileClip(v['content']).subclip(v['in'], v['in'] + v['duration'])
        c = c.set_start(v['start'])
        out.append(c)

    size = out[0].size

    for t in texts:
        c = create_sub(t['content'], size, rect_offset=195, min_height=55)
        c = c.set_start(t['start'])
        c = c.set_duration(t['duration'])
        out.append(c)

    final_clip = CompositeVideoClip(out)
    if outfile is None:
        outfile = 'msg_' + str(int(time.time())) + '.mp4'
    if filetype == 'gif':
        outfile = outfile.replace('.mp4', '.gif')
        final_clip.speedx(1.7).write_gif(outfile, fps=7, loop=1)
    else:
        final_clip.write_videofile(outfile, fps=24, codec='libx264')
    return outfile
示例#15
0
def concat_video(file, count, td, output):
    d = clip_duration(file)
    video = CompositeVideoClip([
        VideoFileClip(file).set_start((d * i) - (td * i)).fx(
            transfx.crossfadein, td * (0 if i == 0 else 1))
        for i in range(0, count)
    ])
    video.write_videofile(output)
示例#16
0
	def _compose_buffer(self):
		audio = concatenate_audioclips(self.sounds)
		video = CompositeVideoClip(self.images, 
			size=(self.w, self.h)).set_duration(audio.duration)
		video = video.set_audio(audio)
		self.clips.append(video)
		self.sounds, self.images = [], []
		self._push_image(self.background_image)
示例#17
0
def annotate(clip, txt, speaker, txt_color='white', fontsize=30, font='Arial'):
    """ Writes a text at the bottom of the clip. """
    txt_colors = ['red', 'black', 'white', 'blue', 'green']
    txtclip = TextClip(txt,
                       fontsize=fontsize,
                       font=font,
                       color=txt_colors[speaker])
    cvc = CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
    return cvc.set_duration(clip.duration)
示例#18
0
def mergeClips():
    videoFileNames = readClip()
    clips = []
    for i in range(0, len(videoFileNames)):
        clips.append(VideoFileClip(videoFileNames[i]))

    print(clips)
    result = CompositeVideoClip([clips[0], clips[1], clips[2]])
    result.write_videofile("mergedNew.mp4", fps=60)
示例#19
0
def make(resolution=(3840, 2160),
         blur=0,
         debug_mode=False,
         gradient_opacity=1,
         file_name=''):

    # opens art, then adds blur, then blows up
    art_image = Image.open('lib/temp/art.png')
    art_image = art_image.filter(ImageFilter.GaussianBlur(radius=blur))
    if (resolution > art_image.size):
        if debug_mode:
            print('Art smaller than background needed')
        art_image = art_image.resize(
            (math.ceil(resolution[0] * 1.05), math.ceil(resolution[0] * 1.05)),
            Image.ANTIALIAS)
    else:
        if debug_mode:
            print('Art larger than background needed')
    if debug_mode:
        print('Background size before crop: ' + str(art_image.size))

    # cropping the blurred art
    width, height = art_image.size

    left = (width - resolution[0]) / 2
    top = (height - resolution[1]) / 2
    right = (width + resolution[0]) / 2
    bottom = (height + resolution[1]) / 2

    # crop
    art_image = art_image.crop((left, top, right, bottom))

    art_image.save('lib/temp/' + file_name + '.png', 'PNG')
    art_image.close()

    # determines if the art is dark with is_dark
    if is_dark.calc(debug_mode=debug_mode):
        if debug_mode:
            print('Detected dark art; using white gradient')
        gradient_clip = ImageClip(
            'themes/radio/gradient_white.png',
            transparent=True).set_opacity(gradient_opacity)
    else:  # its light
        if debug_mode:
            print('Detected light art; using black gradient')
        gradient_clip = ImageClip(
            'themes/radio/gradient_black.png',
            transparent=True).set_opacity(gradient_opacity)

    gradient_clip.resize(resolution)
    art_clip = ImageClip('lib/temp/' + file_name + '.png', transparent=True)
    transparent = ImageClip('lib/transparent.png').resize(resolution)

    # again, the transparent needs to go first, this is to force a given resolution
    background_clip = CompositeVideoClip(
        [transparent, art_clip, gradient_clip])
    background_clip.save_frame('lib/temp/' + file_name + '.png')
def add_text(content, font_size):
    '''
    add text on the top of video stream
    '''
    txt_clip = (TextClip(content, fontsize=font_size,
                         color='white').set_position('top').set_duration(
                             video.duration))
    result = CompositeVideoClip([video, txt_clip])
    result.write_videofile(new_file)
示例#21
0
def handleVideo(file_content):
    for i in range(0, len(file_content)):
        arr = file_content[i][0].split("-->")
        #列出音频文件文件夹下的所有文件 随机抽取音频插入剪切视频
        musicFiles = os.listdir(arr[1])
        musicFilesTempLength = len(musicFiles) - 1
        #视频信息
        clip = VideoFileClip(arr[0]).subclip(arr[2], arr[3])
        #如果music文件夹下只有一个音频文件 则取第0个文件 其余时候取随机位置的音频
        if musicFilesTempLength > 0:
            musicFilesIndex = random.randint(0, musicFilesTempLength)
        else:
            musicFilesIndex = 0

        audioClip = AudioFileClip(arr[1] + musicFiles[musicFilesIndex])
        video = clip.set_audio(audioClip)
        videoDuration = int(video.duration)
        # 获取mp3文件的长度 最多从音频文件的长度-video.duration的长度开始截取长度为video.duration的音频
        # 随机选择Music文件夹下的音频文件
        musicInfo = eyed3.load(arr[1] + musicFiles[musicFilesIndex])
        # 得到mp3文件的长度 单位为秒
        musicSecs = int(format(musicInfo.info.time_secs))
        # 随机开始位置 确保加上video.duration之后不超过mp3文件的总长  最后几秒基本没有声音了 所以最多截到倒数第10秒
        #如果所截视频长度大于音频长度 则从音频的0秒开始截取
        if musicSecs - videoDuration <= 0:
            musicStart = 0
            musicEnd = musicSecs

        else:
            musicStart = random.randint(0, musicSecs - videoDuration)
            musicEnd = musicStart + videoDuration

        # 从随机位置开始截取音频 并设置淡入淡出效果
        video.audio = video.audio.subclip(musicStart, musicEnd)
        try:
            video.audio = audio_fadein(video.audio, 2.0)
            video.audio = audio_fadeout(video.audio, 2.0)
        except Exception as e:
            with open('error.txt', mode="w+") as f:
                f.write(str(e))

        # 设置视频尺寸 设置视频居中显示
        result = CompositeVideoClip([video.set_pos(('center'))],
                                    size=(1366, 728))
        #创建存储目录
        outputVideoPath = arr[1] + "../outputVideo/"
        isExists = os.path.exists(outputVideoPath)
        if not isExists:
            os.makedirs(outputVideoPath)
        result.write_videofile(outputVideoPath + "outputVideo" + str(i) +
                               ".mp4",
                               codec="libx264",
                               fps=15,
                               bitrate="512K",
                               audio_fps=44100,
                               audio_bitrate="128k")
        i += 1
示例#22
0
 def get_clip(self):
     audio = self.audio_clip
     screen_clips = [
         self.get_screen_clip(i) for i in range(0, len(self.screens))
     ]
     video = CompositeVideoClip(clips=screen_clips)
     video.fps = 24
     video.audio = audio
     return video
示例#23
0
def video_render(txt_file,image_file,sound_file,save_file):
        from moviepy.editor import ImageClip
        from moviepy.editor import CompositeVideoClip
        from moviepy.editor import CompositeAudioClip
        from moviepy.editor import TextClip
        from moviepy.editor import AudioFileClip
        from moviepy.editor import concatenate
        from moviepy.config import change_settings
        change_settings({"IMAGEMAGICK_BINARY": "/usr/local/bin/convert"})
        text=[]
        
        with open(txt_file,'r') as file:
            for lines in file:
                if lines!="\n":
                    text.append(lines.rstrip('\n'))
        durs=[]
        for i in text:            
            res = len(re.findall(r'\w+', i)) 
            if res/2>3:
                durs.append(res/2)
            else:
                durs.append(3)
        total_duration=sum(durs)
        
        a_clip = AudioFileClip(sound_file)
        if a_clip.duration<total_duration:
            new_audioclip = CompositeAudioClip([a_clip, a_clip.set_start(a_clip.duration-1)]).set_duration(total_duration+3)
        else:
            new_audioclip=a_clip.set_duration(total_duration+3)
        
        screen=(1920,1080)
        clip_list = []
        i=0
        for string in text:
            duration=durs[i]
            i+=1
            try:
                txt_clip = TextClip(string, fontsize = 70, color = 'white', method='caption',size=screen ).set_duration(duration).set_pos('center')
                clip_list.append(txt_clip)
            except UnicodeEncodeError:
                txt_clip = TextClip("Issue with text", fontsize = 70, color = 'white').set_duration(2) 
                clip_list.append(txt_clip)
        
        final_text_clip = concatenate(clip_list, method = "compose").set_start(3)  
            
        v_clip = ImageClip(image_file).set_duration(total_duration+3)
        video=CompositeVideoClip([v_clip, final_text_clip])
        # video = video.set_audio(AudioFileClip('sound/Serenity (1).mp3'))
        video = video.set_audio(new_audioclip)
        video.write_videofile(save_file, 
                              codec='libx264',
                              fps=10, 
                              threads=4,
                              audio_codec='aac', 
                              temp_audiofile='temp-audio.m4a', 
                              remove_temp=True
                              )
示例#24
0
def compose(resources, params):
    """Creates a video clip out of the videos and the images of the game as well as the audio from the description"""

    # Set up a variable to save the duration of the clip
    current_duration = 0

    # Set the limit parameters
    process_images = True
    process_videos = True
    process_audio = True

    # Set if the images should be processed
    if 'image_limit' in params:
        if params['image_limit'] == 0:
            process_images = False

    # Set if the videos should be processed
    if 'video_limit' in params:
        if params['video_limit'] == 0:
            process_videos = False

    # Set if audio should be processed
    if 'generate_audio' in params:
        process_audio = params['generate_audio']

    # Add the videos to the composed clip
    if process_videos:
        for video in range(len(resources['videos'])):
            # Set the start of each video
            resources['videos'][video] = VideoFileClip(resources['videos'][video]).set_start(current_duration)

            # Set the new duration of the clip
            current_duration += resources['videos'][video].duration

    # Add the images to the composed clip
    if process_images:
        for image in range(len(resources['images'])):
            # Get the images into a work variable
            tmp = resources['images'][image]

            # Create an image clip and set the start properly
            resources['images'][image] = ImageClip(resources['images'][image], duration=5).set_start(current_duration)

            # Set the name of the image clip
            resources['images'][image].filename = tmp

            # Set the new duration for the clip
            current_duration += resources['images'][image].duration

    # Add the audio to the video clip
    if process_audio:
        # Create the final clip with audio
        return CompositeVideoClip(resources['videos'] + resources['images']).set_audio(
            set_up_audio_clip(resources['audio']))

    # Create the final clip without audio
    return CompositeVideoClip(resources['videos'] + resources['images'])
示例#25
0
	def scaleVideo(self, fileLocation):
		(name, extension) = self.__removeExtension(fileLocation)

		clip = VideoFileClip(fileLocation)
		currentFPS = clip.fps
		# clip = clip.resize(0.75)
		final = CompositeVideoClip([clip])

		newPath = name + "-scaled" + extension
		final.write_videofile(newPath, fps=currentFPS/2, codec='libx264')
def compose(text, duration=4.0, outname="sunset_words.mp4"):
    start = 108
    end = start + duration
    clip1 = VideoFileClip("sunset.mp4").subclip(start, end).resize(
        (1920 / 2, 1080 / 2))
    clip2 = TextClip(text, size=clip1.size).set_duration(4)

    composition = CompositeVideoClip([clip1, clip2])

    composition.write_videofile(outname)
示例#27
0
def merge_videos(path_source_1, path_source_2, mixed_path):

    clip01 = VideoFileClip(path_source_1)
    clip02 = VideoFileClip(path_source_2)

    clip01 = clip01.resize(0.60)
    clip02 = clip02.resize(0.60)

    final_clip = CompositeVideoClip([clip01.set_position(("left","center")), clip02.set_position(("right","center"))], size=(720, 460))
    final_clip.write_videofile(mixed_path)
示例#28
0
def process_video(filename, overwrite=False, max_width=1600, max_height=1600, max_file_size=5*1024**2, gifdir='gifs/'):

    gif_name = gifdir + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."

    if video_file.h > max_height:
        video_file = video_file.resize(height=max_height)

    if video_file.w > max_width:
        video_file = video_file.resize(width=max_width)

    end_image = video_file.to_ImageClip(video_file.end-(1/video_file.fps)).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])
    fadein_video_file = CompositeVideoClip(
        [video_file,
         (video_file.to_ImageClip()
          .set_duration(0.7)
          .crossfadein(0.4)
          .set_start(video_file.duration-0.7)),
     ]
    )
    
    logo_size = video_file.h/6
    text = ImageClip(
        expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(
            video_file.duration).resize(width=logo_size).set_pos(
                (video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([fadein_video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > max_file_size:
        process_video(filename,
                      max_height=video_file.h*0.95,
                      overwrite=True,
                      gifdir=gifdir,
                      max_file_size=max_file_size)
示例#29
0
def mergeVideos():
    clip1 = VideoFileClip("text.mp4").subclip(0, 15)
    print(clip1.duration)
    clip2 = VideoFileClip("mhls.mp4").subclip(0, 15).resize(0.5)
    video = CompositeVideoClip([clip1,
                                clip2
                                ])
    CompositeVideoClip([clip1.set_pos("left", "center"), clip2.set_pos("right", "center")],
                       size=(clip1.w + clip1.w, clip2.h))

    video.write_videofile("merge_video.mp4")
示例#30
0
def visualize(model_cls, input_data):
    os.environ["FFMPEG_BINARY"] = "ffmpeg"

    model = model_cls()
    output = model.encode(input_data)
    output = output.reshape(output.shape[0] * 512, 128)
    min_val = np.amin(output)
    max_val_normalized = np.amax(output) - min_val

    last_percentage = -1
    figures = []

    # (graph total duration / graph datapoint count) * (graph datapoint count / graph width)
    figure_snapshot_rate = 40
    tick_to_sample_ratio = 32.87890625  # This is still off sync with the audio, 2:53 becomes 2:58 for some reason
    frame_duration = (figure_snapshot_rate * tick_to_sample_ratio) / 44100
    for i in range(128):
        column = i % 16
        row = int(i / 16)
        figures.append(Figure(60, 60, row, column, frame_duration))

    print(f"Rendering output: {output.shape}")
    for index, entry in enumerate(output):
        should_snapshot = index % figure_snapshot_rate == 0

        for plot_index, plot in enumerate(figures):
            plot.push((entry[plot_index] - min_val) / max_val_normalized)

            if should_snapshot:
                plot.snapshot()

        percentage = int(index / len(output) * 100)
        if percentage % 1 == 0 and last_percentage != percentage:
            last_percentage = percentage
            print(f"Capturing figures: {percentage}%...")

    print(f"{len(figures[0].figures)} figure frames rendered")
    clips = [FigureClip(figure) for figure in figures]

    audio_filename = f"vis/output.wav"
    output = model.predict_output(input_data).flatten()
    write_wav(audio_filename, output)

    del model
    backend.clear_session()

    audio = AudioFileClip(audio_filename)
    audio = audio.set_start(0)
    audio = audio.set_duration(
        min(audio.duration, frame_duration * len(figures[0].figures)))

    result = CompositeVideoClip(clips, size=(16 * 66 + 12, 8 * 66 + 12))
    result = result.set_audio(audio)
    result.write_videofile("vis/output.mp4", fps=1 / frame_duration)
def write_text(text_chunks, original_video, output, font_size=15):
    txt_clips = []
    for i in range(len(text_chunks)):
        txt_clips.append(TextClip(text_chunks[i], fontsize=font_size, color="yellow")
                         .set_position('bottom')
                         .set_duration(3)
                         .set_start(i * 3))
    clips = [original_video]
    clips.extend(txt_clips)
    result = CompositeVideoClip(clips)
    result.write_videofile(output)
示例#32
0
文件: video.py 项目: andyshinn/usb
    def thumbnail(self, time, dest, text):
        text_clip = VideoFile._generate_text(text)
        text_clip.duration = self.video_clip.duration
        video = CompositeVideoClip([self.video_clip, text_clip])
        video.duration = self.video_clip.duration

        try:
            video.save_frame(dest, t=(time + 1.0))
            logger.info("writing out thumbnail: {}", dest)
        except ValueError:
            logger.opt(
                exception=True).debug("Exception logged with debug level:")
示例#33
0
def build_title(vid, title_start, title_end, texts, text_i, conf):
    t = texts[text_i]
    t = t.replace('\\n', '\n')

    clip = vid.subclip(title_start, title_end)

    text = TextClip(f"{t}", fontsize=conf.titlesize, font=conf.titlefont, color=conf.titlecolor).set_pos(("center", "bottom"))

    comp_clip = CompositeVideoClip([clip, text])
    comp_clip.duration = clip.duration

    return comp_clip, text_i + 1
示例#34
0
def make_sentence(sayer, text):
    word_list = text.lower().split(" ")

    list_dir = os.listdir(s_dir)
    sentences = sorted(list_dir)
    wrote = 0
    cuts_dirs = []
    cuts = []
    not_found = []
    for single in word_list:
        print "Word = " + single
        count = 0
        flag = 0
        for sen in sentences:
            print "Searhing sentence #" + str(count)
            words = os.listdir(s_dir + "/" + sen + "/words")
            words_dir = s_dir + "/" + sen + "/words"
            for word in words:
                if single == word and (single in sen.lower()):
                    cut_single = words_dir + "/" + single + "/0.mp4"
                    cuts_dirs.append(cut_single)
                    flag = 1
                    break
            count += 1
            if flag == 1:
                break

        if flag == 0:
            not_found.append(single)
    if not len(not_found) == 0:
        print "Keywords that are not found are: " + str(not_found)
        return
    video_cuts = []
    for file in cuts_dirs:
        print file
        video = VideoFileClip(file)
        video = video.resize((1280, 720))
        video_cuts.append(video)
    final = concatenate_videoclips(video_cuts)

    word_text = TextClip(text, fontsize=40, color="white",
                         bg_color="black").set_pos("bottom").set_duration(
                             final.duration)
    final = CompositeVideoClip([final, word_text])
    if not os.path.exists(sayer + "* " + text):
        os.makedirs(sayer + "* " + text, 0777)
    final.write_videofile(sayer + "* " + text + "/" + text + ".mp4",
                          codec='libx264',
                          audio_codec='aac',
                          temp_audiofile=sayer + "* " + text + "/" + text +
                          ".m4a",
                          remove_temp=True)
示例#35
0
文件: gb.py 项目: aristotll/gifbook
    def generate(self,
                 start_sub=None, end_sub=None, resize=.5, compression=20):
        subs = pysrt.open(self._subtitles_file)

        for iter_, sub in enumerate(subs, start_sub if start_sub else 0):
            if end_sub and iter_ > end_sub:
                break

            gif_file_name = os.path.join(
                self._output_dir,
                '{clip}_{iter}.gif'.format(
                    clip=self._clip_file,
                    iter=iter_
                ))

            clip = (
                self._clip
                .subclip((sub.start.minutes, sub.start.seconds),
                         (sub.end.minutes, sub.end.seconds))
                .resize(resize)
            )

            compositions = [clip]

            subtitles_y_pos = self._subtitles_position_y
            for line in sub.text.split('\n'):
                subtitles_y_pos += 20

                text = (
                    TextClip(line,
                             fontsize=self.subtitles_font_size,
                             color=self.subtitles_color,
                             stroke_width=self.subtitles_stroke_width,
                             stroke_color=self.subtitles_stroke_color,
                             bg_color=self.subtitles_background_color,
                             font=self.subtitles_font_name)

                    .set_pos((self._subtitles_position_x,
                              subtitles_y_pos))

                    .set_duration(clip.duration))

                compositions.append(text)

            composition = CompositeVideoClip(compositions)

            composition.write_gif(gif_file_name, fuzz=compression)
def filter_add_intertitle(
        video_clip,
        text,
        color,
        font,
        fontsize,
        position,
        duration,
        width,
        height):
    text_clip = generate_text_clip(
        text, width * TEXT_WIDTH_FACTOR,
        color=color, font=font, fontsize=fontsize)
    composite_clip = CompositeVideoClip(
        [text_clip.set_pos(position)],
        (width, height))
    intertitle_clip = composite_clip.subclip(0, duration)
    return concatenate_videoclips(
        [intertitle_clip, video_clip],
        method='compose')
    def build__videos(self, option, skipbuild, interval):
        # video output file name
        vid_out_file = self.vid_build_name(option, interval)
     
        # build the vine compilation
        if not skipbuild:
            clips = []
         
            # add the intro
            intro = self.add_intro()
            clips.append(intro)
            currloc = intro.duration
         
            # generate list of all available vids
            (tmpclips, currloc, totalcliptime) = self.generate_vid_list(\
                                                                option, currloc)
          
            # set the background image
            clip= self.add_background(intro, totalcliptime)
            clips.append(clip)
              
            # add list of individual vids
            clips.extend(tmpclips)
          
            # add the outro
            outro = self.add_outro(currloc)
            clips.append(outro)
              
            # add previous days best video to outro
            best_vid_clip = self.get_previous_bests_vid(option, currloc)
            clips.append(best_vid_clip)
          
            # finalize the video file
            final_clip = CompositeVideoClip(clips, size=(1920,1080))
            final_clip.fps=30
            final_clip.write_videofile(vid_out_file)

        return vid_out_file
示例#38
0
def process_video(filename, video_height=480, overwrite=False):

    gif_name = 'gifs/' + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."


    video_file = video_file.resize(height=video_height)

    end_image = video_file.to_ImageClip(0).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])

    logo_size = video_height/6
    text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > 5*1024**2:
        process_video(filename, video_height=video_height*0.75, overwrite=True)
示例#39
0
文件: pitch.py 项目: jaflo/misc
def poop(source, destination, midi_file, stretch, fadeout, rebuild, max_stack):
    """
    Create multiple pitchshifted versions of source video and arrange them to
    the pattern of the midi_file, also arrange the video if multiple notes play
    at the same time.
    """

    print "Reading input files"
    video = VideoFileClip(source, audio=False)
    """
    Non-main tracks are 30% the size of the main and have a white border and a
    margin around them.
    """
    smaller = video.resize(0.3)\
        .margin(mar=2, color=3*[255])\
        .margin(mar=8, opacity=0)
    audio = AudioFileClip(source, fps=44100)
    mid = MidiFile(midi_file)
    ignoredtracks = ["Percussion", "Bass"]

    print "Analysing MIDI file"
    notes = []   # the number of messages in each track
    lowest = 127 # will contain the lowest note
    highest = 0  # will contain the highest note
    for i, track in enumerate(mid.tracks):
        notes.append(0)
        #if track.name in ignoredtracks: continue
        for message in track:
            if message.type == "note_on":
                lowest = min(lowest, message.note)
                highest = max(highest, message.note)
                notes[-1] += 1
    """
    The main track is the one featured in the center. It is probably the one
    with the most notes. Also record the lowest, highest, and average note to
    generate the appropriate pitches.
    """
    maintrack = max(enumerate(notes), key=lambda x: x[1])[0]
    midpitch = int((lowest+highest)/2)
    print "Main track is probably", str(maintrack)+":", mid.tracks[maintrack].name
    mid.tracks.insert(0, mid.tracks.pop(maintrack)) # move main track to front
    notes.insert(0, notes.pop(maintrack)) # move main note count to front
    print sum(notes), "notes ranging from", lowest, "to", highest, "centering around", midpitch

    print "Transposing audio"
    sound = audio.to_soundarray(fps=44100) # source, original audio
    tones = range(lowest-midpitch, highest-midpitch) # the range of pitches we need
    pitches = [] # this will contain the final AudioFileClips
    if not os.path.exists("pitches/"):
        print "Creating folder for audio files"
        os.makedirs("pitches/")
    for n in tones:
        """
        Pitches only need to be generated if they do not already exist or if
        we force the creation of new ones. Save them in order in pitches.
        """
        name = "pitches/"+source+"_"+str(n)+".mp3"
        if not os.path.isfile(name) or rebuild:
            print "Transposing pitch", n
            splitshift(sound, n).write_audiofile(name)
        pitches.append(AudioFileClip(name, fps=44100))

    print "Adding video clips"
    clips = [video.set_duration(1)] # to set the video size
    positions = [("left", "bottom"), ("right", "bottom"), ("left", "top"),
        ("right", "top"), ("center", "bottom"), ("center", "top"),
        ("left", "center"), ("right", "center")] # non-main tracks
    """
    curpos is the current corner position on the screen and changes with each track.
    cache is used to make a unique file name whenever a new temporary file is created.
    endtime will be used at the end to set the end TextClip. It is the latest time any clip ends.
    """
    curpos = -2
    cache = endtime = 0
    for i, track in enumerate(mid.tracks):
        #if track.name in ignoredtracks: continue
        print("Processing {} notes: {}".format(notes[i], track.name))
        t = 1.0 # not 0 because we added one second of original video for size
        opennotes = [] # will contain all notes that are still playing
        curpos += 1
        for message in track:
            if not isinstance(message, MetaMessage):
                message.time *= stretch
                t += message.time
                if message.type == "note_on":
                    """
                    Add a video clip with the appropriate starting time and
                    pitch. Also add an entry to opennotes (we don't know when
                    the note ends yet).
                    """
                    part = video
                    mainvid = i is 0# and len(opennotes) is 0
                    if not mainvid: part = smaller
                    part = part\
                        .set_audio(pitches[min(len(pitches)-1, max(0, message.note-lowest))])\
                        .set_start(t/1000)
                    opennotes.append((message.note, len(clips), t))
                    """
                    If this isn't the main track, the video will be smaller and
                    placed at the edge. We'll get a position for each track.
                    If there is more than one video playing in this track, it
                    will be placed slighly closer to the center.
                    """
                    if not mainvid:
                        stackheight = 6
                        part = part.set_position(positions[curpos % len(positions)])
                    clips.append(part)
                elif message.type == "note_off":
                    reference = message.note
                    index = 0
                    """
                    Find the note that ended in opennotes using the note.
                    Get the index and start time, remove it from opennotes.
                    """
                    for note in reversed(opennotes):
                        n, j, d = note
                        if n == reference:
                            index = j
                            opennotes.remove(note)
                            break
                    """
                    Get the clip for the open note, set its time to the
                    difference between time now and start time. Have it fade out
                    and update the endtime if needed.
                    """
                    clips[index] = clips[index].set_duration((t-d)/1000+fadeout)
                    clips[index] = clips[index].crossfadeout(fadeout)
                    endtime = max(endtime, t/1000+fadeout)
                if len(clips) == max_stack:
                    """
                    To save some memory, the clips in memory are emptied
                    whenever they reach a certain size. All clips that are closed
                    are merged into one file on disk.
                    """
                    upuntil = len(clips) # the first open note
                    if len(opennotes) > 0: _, upuntil, _ = opennotes[0]
                    stillopen = clips[upuntil:]
                    print "Stack reached", len(clips), "clips, merging", upuntil
                    """
                    Save a temporary file to disk with all clips we can safely
                    discard from clips.
                    """
                    newcache = destination+".temporary"+str(cache)+".mp4"
                    CompositeVideoClip(clips[:upuntil]).write_videofile(newcache)
                    cache += 1
                    """
                    Shift all opennotes' indices down by the number of clips
                    merged and saved to disk. Set clips to be the new, merged
                    clip and any leftover clips.
                    """
                    for i, note in enumerate(opennotes):
                        n, j, d = note
                        opennotes[i] = (n, j-upuntil+1, d)
                    clips = [VideoFileClip(newcache)]+stillopen

    end = TextClip("pitch.py", font="Arial", color="white", fontsize=70)\
        .set_pos("center")\
        .set_duration(1)\
        .set_start(endtime)
    clips.append(end) # add an ending frame

    """
    Combine all leftover clips, write them to the final file and remove
    temporary files created before.
    """
    print "Combining", len(clips), "clips"
    final = CompositeVideoClip(clips).set_start(1)
    final.write_videofile(destination)
    clips = []
    if cache == 1:
        print "Removing one temporary file"
    elif cache > 1:
        print "Removing", cache, "temporary files"
    for i in range(0, cache):
        os.remove(destination+".temporary"+str(i)+".mp4")
    job = fake.job()
    name = fake.name()
    if len(job) < 25:
        text.append((job, name))


def make_frame(t):
    """Draw text elements in each frame"""
    surface = gz.Surface(W, H, bg_color=(0,0,0))
    for i, line in enumerate(text):
        job, name = line
        ypos = LINE_HEIGHT * i - int(t * SCROLLSPEED) + BOTTOM_START
        
        txt = gz.text(job, "Amiri", TEXTSIZE, fontweight='bold', \
                      h_align='right', fill=(1,1,1))
        left = gz.Group([txt]).translate((LEFTCOL, ypos))
        left.draw(surface)
        txt = gz.text(name, "Amiri", TEXTSIZE, \
                      fontweight='normal',h_align='left', fill=(1,1,1))
        right = gz.Group([txt]).translate((RIGHTCOL, ypos))
        right.draw(surface)
    return surface.get_npimage()


clip = mpy.VideoClip(make_frame, duration=DURATION)

# mix text and logo together
final = CompositeVideoClip([clip, LOGO])
final.subclip(0, DURATION).write_videofile("abspann.avi", codec="h264", fps=24) 
#final.subclip(0, DURATION).write_videofile("abspann.mp4", codec="mpeg4", fps=24)