def create_videoclip(self, post_number, is_comment=False, comment_number=0): """ Creates a video from the given details from the image and video paths. """ aud_path = f"audios/post_{post_number}.mp3" img_path = f"images/post_{post_number}.png" out_path = f"videos/post_{post_number}.mp4" if is_comment: aud_path = f"audios/post_{post_number}_comment_{comment_number}.mp3" img_path = f"images/post_{post_number}_comment_{comment_number}.png" out_path = f"videos/post_{post_number}_comment_{comment_number}.mp4" print( f"Creating video {out_path} from audio {aud_path} and image {img_path}\n" ) aud_clip = AudioFileClip(aud_path) vid_clip = ImageClip(img_path) vid_clip = vid_clip.set_audio(aud_clip).set_duration(aud_clip.duration) vid_clip.write_videofile(out_path, preset="medium", temp_audiofile='temp-audio.m4a', remove_temp=True, codec="mpeg4", audio_codec="aac", fps=24)
def make_zoom(scale_func, path=im_path, cx=32, cy=32, scale=10, duration=5, fps=10, oversample=2.0): ic = ImageClip(path).resize(oversample) bg = ColorClip((ic.w, ic.h), (0xFF, 0xFF, 0xFF)).set_duration(duration) ic.duration = duration cx *= oversample cy *= oversample total_frames = int(duration * fps) def zoom_between_frames(startf, endf): scales = [ scale_func(startf + f * (endf - startf) / total_frames) for f in range(total_frames) ] return make_zoom_movie(ic, scales, fps, (cx, cy)) # we seem to get two multiple frames at the start... # and end sometimes ret = CompositeVideoClip([ bg, zoom_between_frames(total_frames, 2.0 * total_frames), zoom_between_frames(0, total_frames) ]) ret.size = ic.size # ret.duration = duration return ret.resize(1.0 / oversample)
def vid_from_media(vid_path, media, song_path, titles, img_duration=8): clips = [] print "sequencing media..." for m in media: print m.path if good_file(m.path): try: if is_img(m.path): new_clip = ImageClip(m.path) new_clip.fps = 1.0 / img_duration new_clip.duration = img_duration else: new_clip = VideoFileClip(m.path) text = m.title if titles else None new_clip = format_clip(new_clip, text) clips.append(new_clip) except Exception as err: "COULDN'T CREAT CLIP BECAUSE: " + str(err) else: print 'CORRUPT FILE FOUND: ' + m.path + ', skipping.' vid = concatenate_videoclips(clips) print song_path audio = AudioFileClip(song_path) audio_loops = int(vid.duration / audio.duration) + 1 #times to loop audio audio = concatenate_audioclips([audio] * audio_loops) print audio.duration print vid.duration audio = audio.set_duration(vid.duration) vid = vid.set_audio(audio) print "writing video..." vid.write_videofile(vid_path, progress_bar=False, preset='ultrafast') return abspath(vid_path)
def create_video_file(self): """ Reads all the frames into a list and the durations for each frame. Creates a list with the title and transition then appends the next frame and sets it duration to the value read from the length dictionary. Combines all the clips in the list. """ imgs = [img_file for img_file in glob.glob("temp_files/Images/*.png")] durations = [dur for dur in self.lendict.values()] transition_clip = VideoFileClip("transitions/TVColorBars.mp4") count = 0 clips = [ ImageClip([ img_file for img_file in glob.glob("temp_files/title/*.png") ][0]).set_duration(self.title_dur + 0.5), transition_clip ] #adding title and transition clip for comment_count, indiv in enumerate(imgs): comment_num = str( self.all_comments_names[comment_count].split('$')[1]) clips.append( ImageClip(indiv).set_duration(durations[comment_count])) count += 1 if count % self.num_comments_dict[comment_num] == 0: clips.append(transition_clip) count = 0 self.concat_clip = concatenate_videoclips(clips, method="compose")
def main(url, output): driver = webdriver.Chrome() remote_url = url driver.get(remote_url) png = chrome_takeFullScreenshot(driver) with open("website_image.png", 'wb') as f: f.write(png) driver.close() clip = ImageClip('website_image.png') video_width = int(clip.size[0] + 800) video_height = int(video_width/1.5) bg_clip = ColorClip(size=(video_width, video_height), color=[228, 220, 220]) scroll_speed = 180 total_duration = (clip.h - 800)/scroll_speed fl = lambda gf,t : gf(t)[int(scroll_speed*t):int(scroll_speed*t)+800,:] clip = clip.fl(fl, apply_to=['mask']) video = CompositeVideoClip([bg_clip, clip.set_pos("center")]) video.duration = total_duration if not output.endswith('.mp4'): output += '.mp4' video.write_videofile(output, fps=26) os.remove('website_image.png')
def create_thumbnail(self, clip: Clip): logging.info("Creating yt thumbnail") thumbnail_base = self.get_thumbnail_base(clip) emoji = self.get_emoji() overlay = ImageClip(os.path.join(self.asset_path, "overlay_thumbnail.png")).set_opacity(0.8) number = self.get_number_textclip() try: logo = ( ImageClip(os.path.join(self.asset_path, utils.get_valid_game_name(self.game), "game_logo.png")) .fx(resize, 1.3) .set_position((0.04, 0.6), relative=True) ) except FileNotFoundError: logging.warning("No game_logo in associated asset folder -> thumbnail will be created without logo") logo = None thumbnail = [ thumbnail_base.set_duration(None), emoji.set_duration(None), overlay.set_duration(None), number.set_duration(None), ] if logo: thumbnail.append(logo.set_duration(None)) thumbnail_result = CompositeVideoClip(thumbnail, size=[1280, 720]) thumbnail_result.save_frame(os.path.join(self.compilation_dir, "thumbnail.png"), t=0, withmask=True)
def composite_clips(self, clips: dict): try: watermark = ImageClip(self.overlay).set_position((0.7, 0.1), relative=True) except FileNotFoundError: logging.warning( "No watermark found -> video will be created without watermark" ) watermark = None # Requires metadata about the clip txts = self.generate_clip_text(self.metadata) composite_clips = {} for clip_id, clip in clips.items(): composition = [] duration = clip.duration composition.append(clip) if watermark: composition.append(watermark.set_duration(duration)) composition.append(txts[clip_id].set_duration(duration)) composite_clips[clip_id] = CompositeVideoClip(composition, size=self.target_res) return composite_clips
def process_clip(): clip = VideoFileClip(file_path, target_resolution=[720, 1280]) # I WAS going to get the last 10 seconds but nvm if clip.duration > 10: clip = clip.subclip(0, -clip.duration + 10) safe_duration = max(0, clip.duration - 0.1) # Freeze fram stuff freeze_frame_sound = AudioFileClip( "assets/wellberightback/sound.mp3") freeze_frame = ImageClip(clip.get_frame(safe_duration))\ .fx(vfx.painting, black=0.001)\ .fx(vfx.colorx, factor=0.8).set_duration(freeze_frame_sound.duration) text = ImageClip("assets/wellberightback/text.png")\ .set_pos( lambda t: (50, 50) ) freeze_compos = CompositeVideoClip([freeze_frame, text])\ .set_duration(freeze_frame_sound.duration).set_audio(freeze_frame_sound) # Final clip final_clip = concatenate_videoclips([clip, freeze_compos]) return final_clip, [ clip, freeze_frame_sound, freeze_frame, text, freeze_compos ]
def make_zoom(scale_func, path=im_path, cx=32, cy=32, scale=10, duration=5, fps=10, oversample=2.0): ic = ImageClip(path).resize(oversample) bg = ColorClip((ic.w, ic.h), (0xFF, 0xFF, 0xFF)).set_duration(duration) ic.duration = duration cx *= oversample cy *= oversample total_frames = int(duration * fps) def zoom_between_frames(startf, endf): scales = [scale_func(startf + f * (endf - startf) / total_frames) for f in range(total_frames)] return make_zoom_movie(ic, scales, fps, (cx, cy)) # we seem to get two multiple frames at the start... # and end sometimes ret = CompositeVideoClip([ bg, zoom_between_frames(total_frames, 2.0 * total_frames), zoom_between_frames(0, total_frames) ]) ret.size = ic.size # ret.duration = duration return ret.resize(1.0/oversample)
def convert(name, outname): # Buscamos el vídeo en la carpeta p = Path("videos") files = list(p.glob("*")) for f in files: fstr = str(f).lower() if f.is_file() and f.suffix.lower() in video_formats and name in fstr: video_path = str(f) break print(video_path) # Buscamos la carátula p = Path("Slides_start_end") files = list(p.glob("*")) for f in files: fstr = str(f).lower() if (f.is_file() and "1440" in str(f) and "end_1440" not in str(f) and name in fstr): cover_path = str(f) break print(cover_path) # Buscamos la imagen de cierre p = Path("Slides_start_end", "end_1440.png") end_path = str(p) print(end_path) # Buscamos el ancho y alto del vídeo clip = VideoFileClip(video_path) clip_part = clip.subclip(0, 5) clip_size = clip_part.size # Ponemos el cover y final al mismo tamaño que el vídeo im = Image.open(cover_path) out = im.resize(clip_size) arr_in = np.array(out) im = Image.open(end_path) out = im.resize(clip_size) arr_out = np.array(out) # Generamos la entradilla clip = ImageClip(arr_in).set_duration(5) clip.write_videofile('video_start.mp4', fps=24) clip.close() # Generamos el cierre clip = ImageClip(arr_out).set_duration(5) clip.write_videofile('video_end.mp4', fps=24) clip.close() # Generamos vídeo final clip1 = VideoFileClip('video_start.mp4') clip2 = VideoFileClip(video_path) clip3 = VideoFileClip('video_end.mp4') final = concatenate_videoclips([clip1, clip2, clip3], method="compose") final.write_videofile(f'{outname}_final.mp4') final.close()
def compose(resources, params): """Creates a video clip out of the videos and the images of the game as well as the audio from the description""" # Set up a variable to save the duration of the clip current_duration = 0 # Set the limit parameters process_images = True process_videos = True process_audio = True # Set if the images should be processed if 'image_limit' in params: if params['image_limit'] == 0: process_images = False # Set if the videos should be processed if 'video_limit' in params: if params['video_limit'] == 0: process_videos = False # Set if audio should be processed if 'generate_audio' in params: process_audio = params['generate_audio'] # Add the videos to the composed clip if process_videos: for video in range(len(resources['videos'])): # Set the start of each video resources['videos'][video] = VideoFileClip(resources['videos'][video]).set_start(current_duration) # Set the new duration of the clip current_duration += resources['videos'][video].duration # Add the images to the composed clip if process_images: for image in range(len(resources['images'])): # Get the images into a work variable tmp = resources['images'][image] # Create an image clip and set the start properly resources['images'][image] = ImageClip(resources['images'][image], duration=5).set_start(current_duration) # Set the name of the image clip resources['images'][image].filename = tmp # Set the new duration for the clip current_duration += resources['images'][image].duration # Add the audio to the video clip if process_audio: # Create the final clip with audio return CompositeVideoClip(resources['videos'] + resources['images']).set_audio( set_up_audio_clip(resources['audio'])) # Create the final clip without audio return CompositeVideoClip(resources['videos'] + resources['images'])
def test_issue_285(): clip_1, clip_2, clip_3 = ( ImageClip("media/python_logo.png", duration=10), ImageClip("media/python_logo.png", duration=10), ImageClip("media/python_logo.png", duration=10), ) merged_clip = concatenate_videoclips([clip_1, clip_2, clip_3]) assert merged_clip.duration == 30 close_all_clips(locals())
def convert(outname): # Buscamos el vídeo de Python Doc Es en la carpeta # El vídeo original de Python Docs Es estaba en vertical. Lo he tenido que # transformar y lo he guardado en horizontal (y en mp4) usando: # ffmpeg -i videos/proyecto\ python-docs-es.mp4 -vf 'split[original][copy];[copy]scale=ih*16/9:-1,crop=h=iw*9/16,gblur=sigma=20[blurred];[blurred][original]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2' videos/proyecto\ python-docs-es_reformat.mp4 # Ver https://www.junian.net/tech/ffmpeg-vertical-video-blur/ p = Path("videos", "proyecto python-docs-es_reformat.mp4") video_path = str(p) print(video_path) # Buscamos la carátula p = Path("Slides_start_end", "python_doc_es_1440.png") cover_path = str(p) print(cover_path) # Buscamos la imagen de cierre p = Path("Slides_start_end", "end_1440.png") end_path = str(p) print(end_path) # Buscamos el ancho y alto del vídeo clip = VideoFileClip(video_path) clip_part = clip.subclip(0, 5) clip_size = clip_part.size # Ponemos el cover y final al mismo tamaño que el vídeo im = Image.open(cover_path) out = im.resize(clip_size) arr_in = np.array(out) im = Image.open(end_path) out = im.resize(clip_size) arr_out = np.array(out) # Generamos la entradilla clip = ImageClip(arr_in).set_duration(5) clip.write_videofile('video_start.mp4', fps=24) clip.close() # Generamos el cierre clip = ImageClip(arr_out).set_duration(5) clip.write_videofile('video_end.mp4', fps=24) clip.close() # Generamos vídeo final clip1 = VideoFileClip('video_start.mp4') clip2 = VideoFileClip(video_path) clip3 = VideoFileClip('video_end.mp4') final = concatenate_videoclips([clip1, clip2, clip3], method="compose" ) final.write_videofile(f'{outname}_final.mp4') final.close()
def generate_leaderboard_clip(working_dir: WorkingDir, duration: float = 5.0, frames_per_second: int = 60): if working_dir.leaderboard.exists(): clip = ImageClip(str(working_dir.leaderboard)).set_duration(duration) clip.write_videofile(str(working_dir.leaderboard_clip), fps=frames_per_second) print('Successfully generated leaderboard clip.') else: print(f'No leaderboard has been generated yet.')
def water_mark(request): files = request.FILES vdo = VideoFileClip(files.get('vdo').temporary_file_path()) logo = ImageClip(files.get('logo').temporary_file_path()) wm = (logo.set_duration(vdo.duration) .resize(height=50) # if you need to resize... .margin(right=8, bottom=8, opacity=0) # (optional) logo-border padding .set_pos(("right","bottom"))) CompositeVideoClip([vdo, wm]).write_videofile('media/res.mp4') return JsonResponse({'video':'http://127.0.0.1:8000/media/res.mp4'})
def _read_move_assets(self, assets_path): self.move_assets = dict() for k in os.listdir(assets_path): assert k.endswith('.png') png_file = os.path.join(assets_path, k) clip = ImageClip(png_file) self.move_assets[k[:-4]] = clip
def photo(self, max_duration: int = 0): """ Build CompositeVideoClip from source video Parameters ---------- max_duration: int, optional Duration of the clip if a video clip, default value is 0 Returns ------- StoryBuild An object of StoryBuild """ with Image.open(self.path) as im: image_width, image_height = im.size width_reduction_percent = (self.width / float(image_width)) height_in_ratio = int( (float(image_height) * float(width_reduction_percent))) clip = ImageClip(str(self.path)).resize(width=self.width, height=height_in_ratio) return self.build_main(clip, max_duration or 15)
def main(): clips = [] with open("names.txt") as f: name = f.readlines() print(name) for i in name: i = i.split('\n')[0] clips.append(make(i)) print(clips) concatenate_videoclips(clips).set_fps(30).write_videofile("飞跃起点理.mp4") exit() clip1 = ImageClip("./images/2.jpg") txt = TextClip("吼哇!123ASDasd".encode("utf-8"), font="SimSun", color='white', fontsize=48) txt_col = txt.on_color(size=(clip1.w, txt.h + 10), color=(0, 0, 0), pos=(6, 'center'), col_opacity=0.6).set_pos(lambda t: ((200), (800))) w, h = moviesize = clip1.size txt_mov = txt_col.set_pos(lambda t: (max(w / 30, int(w - 1 * w * t)), max(5 * h / 6, int(100 * t)))) CompositeVideoClip([ clip1, txt_mov ]).set_duration(1).set_fps(30).write_videofile("my_concatenation.mp4") CompositeVideoClip([clip1, txt_mov ]).set_duration(1).set_fps(30).save_frame("test.png", t="00:00:01")
def export_to_gif(self, frame_duration=0.1, loop_mode=0): from .state import LoopMode imgs = [ frame.svg.copy().normalize().draw(do_display=False, return_png=True) for frame in self.frames ] if loop_mode == LoopMode.REVERSE: imgs = imgs[::-1] elif loop_mode == LoopMode.PINGPONG: imgs = imgs + imgs[::-1] clips = [ ImageClip(np.array(img)).set_duration(frame_duration) for img in imgs ] clip = concatenate_videoclips(clips, method="compose", bg_color=(255, 255, 255)) file_path = os.path.join(ROOT_DIR, f"{self.uid}.gif") clip.write_gif(file_path, fps=24, verbose=False, logger=None)
def render_video(env, agents, video_path, n_steps=60, fps=1, seed=None): # Initialization if seed is not None: set_seed(env, seed=seed) states = env.reset() # Set greedy flag for key, agent in agents.items(): agent.is_greedy = True # Run agents frames = [] for _ in tqdm(range(n_steps), 'Running agents', unit='frame'): # Select actions based on current states actions = {key: agent.act(states[key]) for key, agent in agents.items()} # Perform the selected action next_states, rewards, dones, _ = env.step(actions) states = next_states # Save frame frames.append(env.render(mode='rgb_array')) # Create video clips = [ImageClip(frame).set_duration(fps) for frame in frames] concat_clip = concatenate_videoclips(clips, method="compose") concat_clip.write_videofile(video_path, fps=24)
def create_mtg_gif(name, id, border): if border == 'm': # Modern (post-8th Ed) card_upper_corner = (19, 38) gif_width = 202 - card_upper_corner[0] gif_height = 172 - card_upper_corner[1] elif border == 'c': # Current (post-Magic 2015) card_upper_corner = (17, 34) gif_width = 204 - card_upper_corner[0] gif_height = 173 - card_upper_corner[1] else: # Old (pre-8th Ed) card_upper_corner = (25, 30) gif_width = 196 - card_upper_corner[0] gif_height = 168 - card_upper_corner[1] mtg_card = Image.open(BytesIO(requests.get(get_mtg_image(id)).content)) mtg_card = ImageClip(np.asarray(mtg_card)).resize((222, 310)) get_giphy_gif(name) giphy_gif = (VideoFileClip( 'giphy_gif.mp4', target_resolution=(gif_height, gif_width)).set_pos(card_upper_corner)) if giphy_gif.duration < 2: giphy_gif = giphy_gif.fx(loop, n=1 + int(2 // giphy_gif.duration)) mtg_gif = CompositeVideoClip([mtg_card, giphy_gif]) mtg_gif = mtg_gif.set_start(0).set_duration(giphy_gif.duration) # mtg_gif.write_gif("mtg_gif.gif") mtg_gif.write_videofile("mtg_gif.mp4", codec='libx264', bitrate=str(np.power(10, 7)), verbose=False, progress_bar=False, audio=False, ffmpeg_params=['-pix_fmt', 'yuv420p'])
def create_movie(filter, configuration): file_filter = "%s_%s_%s" % (configuration.country, configuration.organization_type.name, filter) log.info("Creating movie for %s" % file_filter) log.debug("Loading filenames") files = [ filename for filename in os.listdir(settings.TOOLS["firefox"] ["screenshot_output_dir"]) if filename.startswith(file_filter) ] # some filters may not result in any files if not files: log.debug( "No suitable images could be found for this Configuration / filter. Did you make screenshots?" ) return files.sort(key=alphanum_key) files = reversed(files) log.debug("Creating clips") clips = [ ImageClip(settings.TOOLS["firefox"]["screenshot_output_dir"] + file).set_duration(0.2) for file in files ] log.debug("Writing file") concat_clip = concatenate_videoclips(clips, method="compose") concat_clip.write_videofile( "%svideo_%s.mp4" % (settings.TOOLS["firefox"]["screenshot_output_dir"], file_filter), fps=30)
def get_final_clip(mask_clip: ClipType, input_clip: ClipType, background: Union[List[float], PathType], **videofileclip_args) -> FinalClipType: """ Apply the mask_clip to the input_clip and use the background, in a way to be used with gse.save_to_file or simply returns the mask_clip \n E.g. final_clip = get_final_clip(mask_clip, input_clip, [0, 255, 0]) \n save_to_file(final_clip, "path/video.mp4") :param mask_clip: got with gse.get_mask_clip :param input_clip: got with gse.get_input_clip :param background: color [R, G, B] or path to video/image or empty string, so the mask_clip is directly returned :param videofileclip_args: additional arguments for moviepy.video.io.VideoFileClip.__init__ """ if background != "": usable_mask = mask_clip.fx(resize, input_clip.size).to_mask() masked_clip = input_clip.set_mask(usable_mask) if type(background) == list: # if color rgb = (background[0], background[1], background[2]) print(f"Using the RGB color {rgb} as the background of {input_clip.filename}") to_return = masked_clip.on_color(color=rgb) elif is_image(background): print(f"Using {background} as image source to the background of {input_clip.filename}") background_clip = ImageClip(background, duration=masked_clip.duration) to_return = smooth_composite(background_clip, masked_clip) else: print(f"Using {background} as video source to the background of {input_clip.filename}") background_clip = VideoFileClip(background, **videofileclip_args) \ .fx(loop, duration=masked_clip.duration).set_duration(input_clip.duration) to_return = smooth_composite(background_clip, masked_clip) to_return.filename = input_clip.filename return to_return else: print("No background selected, skipping compositing") return mask_clip
def make_image_clip( img_file_path: str, duration: int, fps: int, start_at: int, end_at: int, fade_time: tuple, ) -> ImageClip: """ 根据图像路径创建ImageClip :param img_file_path: 图片的路径 :param duration: 显示时长 :param fps: 帧率 :param start_at: 轨道开始位置 :param end_at: 轨道结束位置 :param fade_time: 淡入淡出时间,单位秒 :return: ImageClip类型 """ image_clip = (ImageClip( img_file_path, duration=duration * fps).set_fps(fps).set_start(start_at).set_end(end_at)) image_clip = image_clip.set_pos("center") # 淡入淡出 image_clip = image_clip.fx(vfx.fadein, duration=fade_time[0]) image_clip = image_clip.fx(vfx.fadeout, duration=fade_time[1]) return image_clip
def add_static_image_to_audio(image_path, audio_path, output_path): """Create and save a video file to `output_path` after combining a static image that is located in `image_path` with an audio file in `audio_path`""" # create the audio clip object audio_clip = AudioFileClip(audio_path) # create the image clip object image_clip = ImageClip(image_path) # use set_audio method from image clip to combine the audio with the image video_clip = image_clip.set_audio(audio_clip) # specify the duration of the new clip to be the duration of the audio clip video_clip.duration = audio_clip.duration # set the FPS to 1 video_clip.fps = 1 # write the resuling video clip video_clip.write_videofile(output_path)
def make(resolution=(3840, 2160), blur=0, debug_mode=False, gradient_opacity=1, file_name=''): # opens art, then adds blur, then blows up art_image = Image.open('lib/temp/art.png') art_image = art_image.filter(ImageFilter.GaussianBlur(radius=blur)) if (resolution > art_image.size): if debug_mode: print('Art smaller than background needed') art_image = art_image.resize( (math.ceil(resolution[0] * 1.05), math.ceil(resolution[0] * 1.05)), Image.ANTIALIAS) else: if debug_mode: print('Art larger than background needed') if debug_mode: print('Background size before crop: ' + str(art_image.size)) # cropping the blurred art width, height = art_image.size left = (width - resolution[0]) / 2 top = (height - resolution[1]) / 2 right = (width + resolution[0]) / 2 bottom = (height + resolution[1]) / 2 # crop art_image = art_image.crop((left, top, right, bottom)) art_image.save('lib/temp/' + file_name + '.png', 'PNG') art_image.close() # determines if the art is dark with is_dark if is_dark.calc(debug_mode=debug_mode): if debug_mode: print('Detected dark art; using white gradient') gradient_clip = ImageClip( 'themes/radio/gradient_white.png', transparent=True).set_opacity(gradient_opacity) else: # its light if debug_mode: print('Detected light art; using black gradient') gradient_clip = ImageClip( 'themes/radio/gradient_black.png', transparent=True).set_opacity(gradient_opacity) gradient_clip.resize(resolution) art_clip = ImageClip('lib/temp/' + file_name + '.png', transparent=True) transparent = ImageClip('lib/transparent.png').resize(resolution) # again, the transparent needs to go first, this is to force a given resolution background_clip = CompositeVideoClip( [transparent, art_clip, gradient_clip]) background_clip.save_frame('lib/temp/' + file_name + '.png')
def render_video(sentences, output_path, audio_path): print("Rendering video...") image_slides = [] for key, sentence in enumerate(sentences): image_slide = ImageClip("{}{}".format( key, CONVERTED_IMAGE_SUFFIX)).set_duration(10) text_slide = ImageClip("{}{}{}".format( key, SENTENCE_IMAGE_TAG, CONVERTED_IMAGE_SUFFIX)).set_duration(10) slided_slide = text_slide.fx(transfx.slide_in, 1, get_slide_position_by_sentence_key(key)) slides_video = CompositeVideoClip([image_slide, slided_slide]) image_slides.append(slides_video) final_video = concatenate(image_slides) final_video.write_videofile(output_path, audio=audio_path, fps=DEFAULT_VIDEO_FPS)
def convert_to_video(imgs): fps = 30 clips = [ImageClip(m).set_duration(1 / fps) for m in imgs] folder = "/".join(imgs[0].split("/")[:-1]) video = concatenate(clips, method="compose") filename = '%s/video.mp4' % folder video.write_videofile(filename, fps=fps) return filename
def process_clip(): clip = VideoFileClip("assets/theboys.mp4") picture = ImageClip(file_path, duration=clip.duration)\ .fx(vfx.resize, newsize=[893, 288])\ .set_pos( lambda t: (192, 432) ) final_clip = CompositeVideoClip([clip, picture]) return final_clip, [clip, picture]
def get_emoji(self) -> ImageClip: emoji = random.choice(os.listdir(os.path.join(self.asset_path, "emojis"))) return ( ImageClip(os.path.join(self.asset_path, "emojis", emoji)) .fx(resize, 2.0) .fx(rotate, -25) .set_position(("right", "top"), relative=True) )
def create_video(self, effect): image = ImageClip(self.files['png_' + effect]) if effect == 'nightcore': sound = AudioFileClip(self.files['wav_speed']) if effect == 'chipmunks': sound = AudioFileClip(self.files['wav_pitch_up']) if effect == 'male': sound = AudioFileClip(self.files['wav_pitch_down']) if sound.duration > 600 or sound.duration < 60: return 'audio too short or too long' if effect == 'nightcore': image_pub = ImageClip('resources/backgrounds/nightcorizer.png') image_pub = image_pub.set_duration(20) image = image.set_duration(sound.duration - 20) final_video = concatenate_videoclips([image, image_pub], method="compose") else: image = image.set_duration(sound.duration) final_video = concatenate_videoclips([image], method="compose") final_video = final_video.set_audio(sound) final_video.write_videofile(self.create_file('_' + effect + '.mp4'), fps=20, preset='ultrafast', threads=4, progress_bar=False, verbose=False) self.files['mp4_' + effect] = self.create_file('_' + effect + '.mp4')
def chart_highlights(week, position): gains = ['airplay_gain', 'stream_gain', 'digital_gain', 'highest_ranking_debut'] top_offset = 0 n_movies = 0 gainer_movies = [] for gain in gains: if getattr(week, gain) == position: top_offset = n_movies * 50 n_movies += 1 gainer_image = ImageClip( join(settings.IMAGE_ASSETS, "{}.png".format(gain)) ).set_duration(duration) gainer_movies.append(gainer_image.set_pos( lambda t, top_offset=top_offset: (min(10, -position_image_size['x'] + t * 400), (1080 - 20 - position_image_size['y'] - 55 - top_offset, int(1060 - position_image_size['y'] + 380*t - 380*13))[t > 13]))) return gainer_movies
def write_output(audio_clip, output_filename, background=None, verbose=False): if not background: audio_clip.write_audiofile( output_filename, fps=16000, nbytes=2, bitrate='16k', verbose=verbose ) else: clip = ImageClip(background, duration=audio_clip.duration) clip = clip.set_audio(audio_clip) clip.write_videofile( output_filename, fps=1, audio_fps=16000, audio_nbytes=2, audio_bitrate='16k', verbose=verbose )
def temp_audio_file(open_data=True, duration=2, suffix='.mp3', delete=True): assert suffix in ['.mp3', '.mp4'], 'Invalid suffix type:%s' % suffix # logic taken from https://zulko.github.io/moviepy/ref/AudioClip.html?highlight=sin with utils.temp_file(suffix=suffix, delete=delete) as temp_file: audio_frames = lambda t: 2 *[np.sin(404 * 2 * np.pi * t)] audioclip = AudioClip(audio_frames, duration=duration) if suffix == '.mp3': audioclip.write_audiofile(temp_file, verbose=False) else: image = ImageClip(np.random.rand(30, 30, 3) * 255) videoclip = image.set_audio(audioclip) videoclip.duration = duration videoclip.fps = 24 videoclip.write_videofile(temp_file, verbose=False) try: if not open_data: yield temp_file else: with open(temp_file, 'r') as f: data = f.read() yield data finally: pass
variations = OrderedDict() for p in permutations((0, 1, 2, 3, 4)): out = [] for elem in p: if len(out) == 0: out = chunks[elem].copy() else: out = concatenate((out, chunks[elem].copy())) variations[str(p)] = out.copy() cnt = 0 for p in variations.keys()[:]: cnt += 1 print cnt, p = str(p) # title clip title_clip = TextClip(p, color='white', fontsize=30).set_pos('center').set_duration(2) clips.append(CompositeVideoClip([title_clip], size=screensize)) # generate output files display_audio_clip(variations[p], sample_freq, p) wavfile.write('sound.wav', sample_freq, variations[p].astype(int16)) # load them with MoviePy aud_clip = AudioFileClip('sound.wav', fps=sample_freq) im_clip = ImageClip("tmp.png") im_clip = im_clip.set_audio(aud_clip) im_clip = im_clip.set_duration(aud_clip.duration) clips.append(CompositeVideoClip([im_clip], size=screensize)) video = concat_clips(clips) video.to_videofile("SamsungVariations.avi", codec='mpeg4')
def create_photo_quality_video(request): #load images image1 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image2 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image3 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image4 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image5 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image6 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image7 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image8 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image9 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image10 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) #concatenate clips, play one clip after the other image_clips = concatenate_videoclips([image3.set_duration(2.5), image4.set_duration(2.5), image5.set_duration(2.5), image6.set_duration(2.5), image7.set_duration(2.5), image8.set_duration(2.5)]) title_image_clips = concatenate_videoclips([image1.set_duration(2.5), image2.set_duration(2.5)]) txt_title = (TextClip("Just Back From...Santiago, Chile", fontsize=80, font="Century-Schoolbook-Roman", color="white") .margin(top=5, opacity=0) .set_duration(5) .set_position(("center", "top"))) title_clip = (CompositeVideoClip([title_image_clips, txt_title]) .fadein(0.5).fadeout(0.5)) stats_image_clips = concatenate_videoclips([image9.set_duration(2.5), image10.set_duration(2.5)]) txt_stats = (TextClip("See Santi's recent trip of 1,836 round trip miles, \n with stops..", fontsize=80, font="Century-Schoolbook-Roman", color="white") .margin(top=5, opacity=0) .set_duration(5) .set_position(("center", "top"))) stats_clip = (CompositeVideoClip([stats_image_clips, txt_stats]) .fadein(.5).fadeout(.5)) final_clip = concatenate_videoclips([title_clip, image_clips, stats_clip], method="compose") audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0) final_clip.write_videofile('videos/randomBoastablepicsVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
def generate_video(test=True): video_list = [] sub_video = subscribers_video() video_list.append(sub_video) week = Week.objects.all()[0] for i, position in enumerate(week.position_set.all()): if i == 2 and test: break if i == 50: break video = VideoFileClip(join(settings.VIDEO_ASSETS, "{} - {}.mp4".format(position.song.name, position.song.artist))).set_duration(10) # video = audio_fadeout(video, 2) graph = (ImageClip(join(settings.IMAGES, "graph{}.png".format(position.position))). set_duration(duration)) graph = graph.set_pos(lambda t: ( (max(1445, 1800 - t * 700), (5, int(20 - 400*t + 400*13.2))[t > 13.2]))) #### w, h = video.size position_image = ImageClip( join(settings.IMAGES, "pos{}.png".format(position.position)) ).set_duration(duration) change_image = ImageClip( join(settings.IMAGES, "change{}.png".format(position.position)) ).set_duration(duration) lower_third_image = ImageClip( join(settings.IMAGES, "lower_third{}.png".format(position.position)) ).set_duration(duration) # I am *NOT* explaining the formula, understands who can/want. # txt_mov = txt_col.set_pos(lambda t: (max(w/30, int(w-0.5*w*t)), max(5*h/6, int(100*t))) ) txt_mov = position_image.set_pos( lambda t: (min(0, -position_image_size['x'] + t * 400), (1080 - 20 - position_image_size['y'], int(1060 - position_image_size['y'] + 380*t - 380*13))[t > 13])) change_image_mov = change_image.set_pos( lambda t: (min(change_image_size['x'], -position_image_size['x'] + t * 700), (1080 - 20 - position_image_size['y'], int(1060 - position_image_size['y'] + 400*t - 400*13.2))[t > 13.2])) lower_third_mov = lower_third_image.set_pos( lambda t: (min(change_image_size['x'] + position_image_size['y'], -lower_third_size['x'] + t * 2500), (1080 - 20 - lower_third_size['y'], int(1060 - lower_third_size['y'] + 430*t - 430*13.4))[t > 13.4])) gainer_mov = chart_highlights(week, position.position) final = CompositeVideoClip([video, lower_third_mov, change_image_mov, txt_mov, graph] + gainer_mov, size=((1920, 1080))).fadeout(0.2) video_list.append(final) FINAL = concatenate_videoclips(list(reversed(video_list))) FINAL.write_videofile(join(settings.VIDEOS, "billboard_top_50_this_week.mp4"), fps=24, codec='libx264')
def create_overall_quality_video(request): trip_stats = process_user_stats() #load images image1 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image2 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image3 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image4 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image5 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image6 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image7 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image8 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image9 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image10 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') #calculate max width and height images = [] images.extend([image1, image2, image3, image4, image5, image6, image7, image8, image9, image10]) max_width = 0 max_height = 0 for img in images: if img.size[0] > max_width: max_width = img.size[0] if img.size[1] > max_height: max_height = img.size[1] #create blurred images image1 = CompositeVideoClip([image1.resize((max_width, max_height)).fl_image(blur), image1.resize(.95)]) image2 = CompositeVideoClip([image2.resize((max_width, max_height)).fl_image(blur), image2.resize(.95)]) image3 = CompositeVideoClip([image3.resize((max_width, max_height)).fl_image(blur), image3.resize(.95)]) image4 = CompositeVideoClip([image4.resize((max_width, max_height)).fl_image(blur), image4.resize(.95)]) image5 = CompositeVideoClip([image5.resize((max_width, max_height)).fl_image(blur), image5.resize(.95)]) image6 = CompositeVideoClip([image6.resize((max_width, max_height)).fl_image(blur), image6.resize(.95)]) image7 = CompositeVideoClip([image7.resize((max_width, max_height)).fl_image(blur), image7.resize(.95)]) image8 = CompositeVideoClip([image8.resize((max_width, max_height)).fl_image(blur), image8.resize(.95)]) image9 = CompositeVideoClip([image9.resize((max_width, max_height)).fl_image(blur), image9.resize(.95)]) image10 = CompositeVideoClip([image10.resize((max_width, max_height)).fl_image(blur), image10.resize(.95)]) #concatenate clips, play one clip after the other image_clips = concatenate_videoclips([image1.set_duration(2).fadein(.5).fadeout(.5), image2.set_duration(2).fadein(.5).fadeout(.5), image3.set_duration(2).fadein(.5).fadeout(.5), image4.set_duration(2).fadein(.5).fadeout(.5), image5.set_duration(2).fadein(.5).fadeout(.5), image6.set_duration(2).fadein(.5).fadeout(.5), image7.set_duration(2).fadein(.5).fadeout(.5), image8.set_duration(2).fadein(.5).fadeout(.5), image9.set_duration(2).fadein(.5).fadeout(.5), image10.set_duration(2).fadein(.5).fadeout(.5)]) title_clip = (TextClip("Just Back From...", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(max_width, max_height)) .margin(top=5, opacity=0) .set_duration(3).fadein(.5).fadeout(.5) .set_position(("center", "top"))) stats_clip = (TextClip("See Santi's recent trip of 1,836 round trip miles, with stops..", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(max_width, max_height)) .margin(top=5, opacity=0) .set_duration(4).fadein(.5).fadeout(.5) .set_position(("center", "top"))) final_clip = concatenate_videoclips([title_clip, image_clips, stats_clip], method="compose", padding=-1) audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.5) #write_videofile -> preset : #Sets the time that FFMPEG will spend optimizing the compression. # Choices are: ultrafast, superfast, fast, medium, slow, superslow. # Note that this does not impact the quality of the video, only the size of the video file. # So choose ultrafast when you are in a hurry and file size does not matter. final_clip.write_videofile('videos/overallQualityVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') #final_clip.write_gif('videos/overallQuality.gif', fps=23) html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
#!/usr/bin/env python from moviepy.editor import AudioFileClip, ImageClip from sys import argv audio = AudioFileClip(argv[1]) clip = ImageClip(argv[2]).set_duration(audio.duration).set_audio(audio) clip.write_videofile(argv[3], fps=24)
DURATION = 25 # seconds LEFTCOL = 450 RIGHTCOL = 490 TEXTSIZE = 28 LINE_HEIGHT = 50 SCROLLSPEED = 130 BOTTOM_START = H * 1.2 N_NAMES = 30 # load image that does not move at all LOGO_POS = 950, 300 LOGO_SIZE = 235, 298 LOGO = ImageClip("panda.png").resize(LOGO_SIZE).set_pos(LOGO_POS) LOGO.duration = DURATION # create fake text fake = Factory.create() text = [] while len(text) < N_NAMES: job = fake.job() name = fake.name() if len(job) < 25: text.append((job, name)) def make_frame(t): """Draw text elements in each frame"""
def create_simple_video(request): #load images image1 = ImageClip("media/real pics/1.jpeg").set_duration(2) image2 = ImageClip("media/real pics/2.jpeg").set_duration(2) image3 = ImageClip("media/real pics/3.jpeg").set_duration(2) image4 = ImageClip("media/real pics/4.jpeg").set_duration(2) image5 = ImageClip("media/real pics/5.jpeg").set_duration(2) image6 = ImageClip("media/real pics/6.jpeg").set_duration(2) image7 = ImageClip("media/real pics/7.jpeg").set_duration(2) image8 = ImageClip("media/real pics/8.jpeg").set_duration(2) image9 = ImageClip("media/real pics/9.jpeg").set_duration(2) image10 = ImageClip("media/real pics/10.jpeg").set_duration(2) #concatenate clips, play one clip after the other image_clips = concatenate_videoclips([image1.fadein(.5).fadeout(.5), image2.fadein(.5).fadeout(.5), image3.fadein(.5).fadeout(.5), image4.fadein(.5).fadeout(.5), image5.fadein(.5).fadeout(.5), image6.fadein(.5).fadeout(.5), image7.fadein(.5).fadeout(.5), image8.fadein(.5).fadeout(.5), image9.fadein(.5).fadeout(.5), image10.fadein(.5).fadeout(.5)]) title_clip = (TextClip("Just Back From...", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(image_clips.w, image_clips.h)) .margin(top=5, opacity=0) .set_duration(3).fadein(.5).fadeout(.5) .set_position(("center", "top"))) stats_clip = (TextClip("See Santi's recent trip of 1,836 round trip miles, with stops..", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(image_clips.w, image_clips.h)) .margin(top=5, opacity=0) .set_duration(3).fadein(.5).fadeout(.5) .set_position(("center", "top"))) final_clip = concatenate_videoclips([title_clip, image_clips, stats_clip], method="compose", padding=-1) audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0) final_clip.write_videofile('videos/myPicsVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)