def create_photo_quality_video(request): #load images image1 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image2 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image3 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image4 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image5 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image6 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image7 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image8 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image9 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) image10 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))) #concatenate clips, play one clip after the other image_clips = concatenate_videoclips([image3.set_duration(2.5), image4.set_duration(2.5), image5.set_duration(2.5), image6.set_duration(2.5), image7.set_duration(2.5), image8.set_duration(2.5)]) title_image_clips = concatenate_videoclips([image1.set_duration(2.5), image2.set_duration(2.5)]) txt_title = (TextClip("Just Back From...Santiago, Chile", fontsize=80, font="Century-Schoolbook-Roman", color="white") .margin(top=5, opacity=0) .set_duration(5) .set_position(("center", "top"))) title_clip = (CompositeVideoClip([title_image_clips, txt_title]) .fadein(0.5).fadeout(0.5)) stats_image_clips = concatenate_videoclips([image9.set_duration(2.5), image10.set_duration(2.5)]) txt_stats = (TextClip("See Santi's recent trip of 1,836 round trip miles, \n with stops..", fontsize=80, font="Century-Schoolbook-Roman", color="white") .margin(top=5, opacity=0) .set_duration(5) .set_position(("center", "top"))) stats_clip = (CompositeVideoClip([stats_image_clips, txt_stats]) .fadein(.5).fadeout(.5)) final_clip = concatenate_videoclips([title_clip, image_clips, stats_clip], method="compose") audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0) final_clip.write_videofile('videos/randomBoastablepicsVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
def create_supercut(regions): subclips = [] filenames = set(map(lambda (filename, _): filename, regions)) video_files = {filename: VideoFileClip(filename) for filename in filenames} for filename, region in regions: subclip = video_files[filename].subclip(*region) subclips.append(subclip) if not subclips: return None return concatenate_videoclips(subclips)
def edit_racestart(cls, previous_file): """ Updates the race start parameters in the configuration file and builds a test video. """ try: print( "Editing configuration file {}".format( previous_file)) config = Configuration(previous_file) config.modify_racestart() print( "Creating low-quality video as {}".format( config.output_video)) print( "If video trimming needs to be adjusted, run the ", "Project CARS Replay Enhancer with the `-t` option.") print("\n") print( "To synchronize telemetry with video, run the ", "Project CARS Replay Enhancer with the `-r` option.") print( "Set the synchronization offset to the value shown ", "on the Timer when the viewed car crosses the start ", "finish line to begin lap 2.") try: replay = cls(config.config_file) except ValueError as error: print("Invalid JSON in configuration file: {}".format( error)) else: start_video = replay.build_default_video(False) end_video = replay.build_default_video(False) start_video = start_video.set_duration( start_video.duration).subclip(0, 185) if replay.show_champion: end_video = end_video.set_duration( end_video.duration).subclip( end_video.duration-120) else: end_video = end_video.set_duration( end_video.duration).subclip( end_video.duration-100) output = mpy.concatenate_videoclips( [start_video, end_video]) output.write_videofile( replay.output_video, fps=10, preset='superfast') except KeyboardInterrupt: raise
def cut_movie(movie_fol, movie_name, out_movie_name, subclips_times): from moviepy import editor # subclips_times [(3, 4), (6, 17), (38, 42)] video = editor.VideoFileClip(op.join(movie_fol, movie_name)) subclips = [] for from_t, to_t in subclips_times: clip = video.subclip(from_t, to_t) subclips.append(clip) final_clip = editor.concatenate_videoclips(subclips) final_clip.write_videofile(op.join(movie_fol, out_movie_name))
def writeClips(selectedSubtitles, outputBase): clips = [] count = 0 superclips = 0 volumeFn = lambda array: np.sqrt(((1.0*array)**2).mean()) desiredVolume = 0.03 desiredHeight = 720 # create subtitle file to go along with it newSubs = pysrt.srtfile.SubRipFile() for CurrentSubtitle in selectedSubtitles: print CurrentSubtitle appendSubtitle(newSubs, CurrentSubtitle) movieFilename = getFilename(CurrentSubtitle.filename) clip = mpy.VideoFileClip(movieFilename).subclip(CurrentSubtitle.timeStart,CurrentSubtitle.timeEnd) volume = volumeFn(clip.audio.to_soundarray()) clip.audio = clip.audio.fx(mpy.afx.volumex, desiredVolume/volume) clip = mpyfx.resize(clip, width=clip.w * desiredHeight / clip.h , height=desiredHeight) clips.append(clip) count += 1 if (count == NUM_SEGMENTS_MAX): superClip = mpy.concatenate_videoclips(clips, method="compose") superClip.write_videofile(outputBase + str(superclips) + ".mp4") superClip = None clips = [] newSubs.save(outputBase + str(superclips) + ".srt") gc.collect() count = 0 superclips += 1 newSubs = pysrt.srtfile.SubRipFile() print "Concatenating clips" superClip = mpy.concatenate_videoclips(clips, method="compose") print "Writing final clip" if (superclips > 0): superClip.write_videofile(outputBase + str(superclips) + ".mp4") newSubs.save(outputBase + str(superclips) + ".srt") else: superClip.write_videofile(outputBase + ".mp4") newSubs.save(outputBase + ".srt") superClip = None clips = [] newSubs = None gc.collect()
def new_configuration(cls): """ Creates a new configuration file and builds a test video. """ try: print("No configuration file provided.") print("Creating new configuration file.") config = Configuration() config.new_configuration() print("Creating low-quality video as {}".format( config.output_video)) print( "If video trimming needs to be adjusted, run the ", "Project CARS Replay Enhancer with the `-t` option.") print("\n") print( "To synchronize telemetry with video, run the ", "Project CARS Replay Enhancer with the `-r` option.") print( "Set the synchronization offset to the value shown " "on the Timer when the viewed car crosses the start ", "finish line to begin lap 2.") print( "Please wait. Telemetry being processed and ", "rendered. If this is the first time this data has ", "been used, it make take longer.") try: replay = cls(config.config_file) except ValueError as error: print("Invalid JSON in configuration file: {}".format( error)) else: start_video = replay.build_default_video(False) end_video = replay.build_default_video(False) start_video = start_video.set_duration( start_video.duration).subclip(0, 185) if replay.show_champion: end_video = end_video.set_duration( end_video.duration).subclip( end_video.duration-120) else: end_video = end_video.set_duration( end_video.duration).subclip( end_video.duration-100) output = mpy.concatenate_videoclips( [start_video, end_video]) output.write_videofile( replay.output_video, fps=10, preset='superfast') except KeyboardInterrupt: raise
def create_compilation(filename, index): dims = get_video_dimensions(filename) subclips = [] video_file = VideoFileClip(filename) for label in sorted(index.keys()): label_img_filename = create_title_frame(label_as_title(label), dims) label_clip = ImageClip(label_img_filename, duration=2) os.remove(label_img_filename) subclips.append(label_clip) for region in index[label]: subclip = video_file.subclip(*region) subclips.append(subclip) if not subclips: return None return concatenate_videoclips(subclips)
def add_text_to_movie(movie_fol, movie_name, out_movie_name, subs, fontsize=50, txt_color='red', font='Xolonium-Bold'): # Should install ImageMagick # For centos6: https://www.vultr.com/docs/install-imagemagick-on-centos-6 from moviepy import editor def annotate(clip, txt, txt_color=txt_color, fontsize=fontsize, font=font): """ Writes a text at the bottom of the clip. """ txtclip = editor.TextClip(txt, fontsize=fontsize, font=font, color=txt_color) # txtclip = txtclip.on_color((clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, 'center')) cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))]) return cvc.set_duration(clip.duration) video = editor.VideoFileClip(op.join(movie_fol, movie_name)) annotated_clips = [annotate(video.subclip(from_t, to_t), txt) for (from_t, to_t), txt in subs] final_clip = editor.concatenate_videoclips(annotated_clips) final_clip.write_videofile(op.join(movie_fol, out_movie_name))
def create_summary(filename, regions): """ Join segments Args: filename(str): filename regions(): Returns: VideoFileClip: joined subclips in segment """ subclips = [] input_video = VideoFileClip(filename) last_end = 0 for (start, end) in regions: subclip = input_video.subclip(start, end) subclips.append(subclip) last_end = end return concatenate_videoclips(subclips)
def filter_add_intertitle( video_clip, text, color, font, fontsize, position, duration, width, height): text_clip = generate_text_clip( text, width * TEXT_WIDTH_FACTOR, color=color, font=font, fontsize=fontsize) composite_clip = CompositeVideoClip( [text_clip.set_pos(position)], (width, height)) intertitle_clip = composite_clip.subclip(0, duration) return concatenate_videoclips( [intertitle_clip, video_clip], method='compose')
def excerpt_and_compile_video_file(src_path, dest_path, timestamps, left_padding=0.01, right_padding=0.01, video_codec=DEFAULT_VIDEO_CODEC, audio_codec=DEFAULT_VIDEO_AUDIO_CODEC): """ creates a new video compiled from cuts of `src_path` timestamps (list): a sequence of tuples, in (start, end) (in seconds) """ video = VideoFileClip(src_path) max_ts = video.duration clips = [] for ts in timestamps: x = max(0, ts[0] - left_padding) y = min(max_ts, ts[1] + right_padding) clips.append(video.subclip(x, y)) allclips = concatenate_videoclips(clips) allclips.write_videofile(dest_path, codec=video_codec, audio_codec=audio_codec) return dest_path
def gen_clip(self): def get_subclip( fn , offset, duration ): clip = mp.VideoFileClip(fn) # print clip.size start = int(offset) end = int(offset)+int(duration) print "extracting frames {0} to {1}".format(start,end) newclip = clip.subclip(start,end).resize(width = self.cfg['video_params']['width'], height = self.cfg['video_params']['height']) return newclip clips = [ get_subclip(self.video_list[video]["path"], self.video_list[video]["offset"], self.video_list[video]["duration"]) for video in self.video_list ] if (self.cfg['concat']): concat_clip = mp.concatenate_videoclips(clips,method="compose") concat_clip.set_fps = self.cfg['video_params']['fps'] # concat_clip.set_audio(concat_audio_clip) concat_clip.write_videofile(output_path, fps=self.cfg['video_params']['fps'], codec=self.cfg['video_params']['vcodec'],audio_codec=self.cfg['video_params']['acodec'] , bitrate="2000k") concat_clip.audio.write_audiofile(output_path+".mp3") else: count = 0 (a,b) = os.path.splitext(self.cfg['output']) for clip, video in zip(clips, self.video_list): output = "{}_{}{}".format(a,video,b) print ("Writing {}".format(output)) # try: vparam=[] if self.cfg['video_params']['imovie_support']: vparam=['-pix_fmt','yuv420p'] clip.write_videofile(output, fps=self.cfg['video_params']['fps'], codec=self.cfg['video_params']['vcodec'], audio_codec=self.cfg['video_params']['acodec'] , bitrate=self.cfg['video_params']['bitrate'], ffmpeg_params=vparam) if self.cfg['store_audio']: clip.audio.write_audiofile(output+"."+self.cfg['audio_extension'])
def randomize_video(videofile, segment_length): original_video = mp.VideoFileClip(videofile) duration = original_video.duration clips = [] clip_start = 0 while clip_start < duration: clip_end = clip_start + segment_length if clip_end > duration: clip_end = duration clip = original_video.subclip(clip_start, clip_end) clips.append(clip) clip_start = clip_end random.shuffle(clips) final_video = mp.concatenate_videoclips(clips) final_video.write_videofile('random.mp4', codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
import moviepy.editor as mp clip1 = mp.ColorClip((1280, 720), col=(255, 0, 0)) clip1 = clip1.set_duration(5) clip2 = mp.ImageClip('someimage.jpg') clip2 = clip2.set_duration(10) final_video = mp.concatenate_videoclips([clip1, clip2], method="compose") final_video.write_videofile('composition.mp4', fps=24, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def makeCompilation(path="./", introName='', outroName='', totalVidLength=10 * 60, maxClipLength=20, minClipLength=5, outputFile="output.mp4"): allVideos = [] seenLengths = defaultdict(list) totalLength = 0 for fileName in os.listdir(path): if isfile(join(path, fileName)) and fileName.endswith(".mp4"): print(fileName) filePath = os.path.join(path, fileName) # Destination path clip = VideoFileClip(filePath) clip = clip.resize(width=1920) clip = clip.resize(height=1080) duration = clip.duration print(duration) if duration <= maxClipLength and duration >= minClipLength: allVideos.append(clip) seenLengths[duration].append(fileName) totalLength += duration print("Total Length: " + str(totalLength)) random.shuffle(allVideos) duration = 0 # Add intro vid videos = [] if introName != '': introVid = VideoFileClip("./" + introName) videos.append(introVid) duration += introVid.duration description = "" # Create videos for clip in allVideos: timeRange = generateTimeRange(duration, clip.duration) acc = extractAcc(clip.filename) description += timeRange + " : @" + acc + "\n" duration += clip.duration videos.append(clip) print(duration) if duration >= totalVidLength: # Just make one video break # Add outro vid if outroName != '': outroVid = VideoFileClip("./" + outroName) videos.append(outroVid) finalClip = concatenate_videoclips(videos, method="compose") audio_path = "/tmp/temoaudiofile.m4a" #print(description) # Create compilation finalClip.write_videofile(outputFile, threads=8, temp_audiofile=audio_path, remove_temp=True, codec="libx264", audio_codec="aac") return description
import moviepy.editor as mpy import skimage.exposure as ske import skimage.filters as skf clip = mpy.VideoFileClip("sinc.gif") gray = clip.fx(mpy.vfx.blackwhite).to_mask() def apply_effect(effect, label, **kw): """ Returns a clip with the effect applied and a top label""" filtr = lambda im: effect(im, **kw) new_clip = gray.fl_image(filtr).to_RGB() txt = (mpy.TextClip(label, font="Amiri-Bold", fontsize=25, bg_color='white', size=new_clip.size).set_position( ("center")).set_duration(1)) return mpy.concatenate_videoclips([txt, new_clip]) equalized = apply_effect(ske.equalize_hist, "Equalized") rescaled = apply_effect(ske.rescale_intensity, "Rescaled") adjusted = apply_effect(ske.adjust_log, "Adjusted") blurred = apply_effect(skf.gaussian_filter, "Blurred", sigma=4) clips = [equalized, adjusted, blurred, rescaled] animation = mpy.concatenate_videoclips(clips) animation.write_gif("sinc_cat.gif", fps=15)
from moviepy.editor import VideoFileClip, concatenate_videoclips # In[2]: clip_list = ["Watch Boruto- Naruto Next Generations Episode 160 Online Streaming Subbed & Dubbed.mp4", ] for i in clip_list: clip1 = VideoFileClip(i) clip_array = [] start = 5.0 end = 8.90 while end <= clip1.duration: clip_array.append(clip1.subclip(start,end)) start += 11.1 end += 11.1 final_clip = concatenate_videoclips(clip_array) final_clip.write_videofile("Short_Video/" + i) # In[ ]:
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeVideoClip, AudioFileClip audio = AudioFileClip("song.mp3") audio = audio.subclip((0, 0), (0, 43)) video1 = VideoFileClip("outputM.mp4") video2 = VideoFileClip("end.mp4") final_clip = concatenate_videoclips([video1, video2]) final = final_clip.set_audio(audio) i = 13 final.write_videofile("final.mp4", codec='mpeg4', audio_codec='libvorbis')
for z in v: unprocess.append(z) vids = sorted(unprocess, reverse=False, key=date_key) if (len(vids) > 0): vid_list = [] if (len(vids) > 0): for vid in vids: clip = VideoFileClip(vid) vid_list.append(clip) if (len(vid_list) > 0): full_vid = concatenate_videoclips(vid_list) out_file = f"/Users/davidmoore/Downloads/e2eSensorNetwork-master/archives/{now}_con.mp4" full_vid.write_videofile(out_file) for vid in vids: os.remove(vid) for vid in vids2: os.remove(vid) for vid in vids3: os.remove(vid) for vid in vids4: os.remove(vid)
video_clips = [VideoFileClip(clip) for clip in clips] video_lens = [video_clip.duration for video_clip in video_clips] print(min(video_lens)) min_video_len = min(video_lens) min_video_len = args.vid_len/len(clips) montage = [] for video_clip in video_clips: if video_clip.duration > min_video_len: clip_start = choice(range(0,int(video_clip.duration-min_video_len))) else: clip_start = 0 clip_end = clip_start + min_video_len print(clip_start,clip_end) montage.append(video_clip.subclip(clip_start,clip_end)) final_clip = concatenate_videoclips(montage) final_clip.write_videofile(f'{args.vid_dir}/output.mp4') # while video_len<args.vid_len: # video_clip = VideoFileClip(clips[clips_ctr]) # import pdb; pdb.set_trace(); # clips_ctr = (clips_ctr+1)%len(clips) # video_len += # concatenate_videoclips([VideoFileClip(clip) for clip in clips]) # clip1 = VideoFileClip("myvideo.mp4") # clip2 = VideoFileClip("myvideo2.mp4").subclip(50,60) # clip3 = VideoFileClip("myvideo3.mp4")
def to_videoClip(imgs, fps): imgs = gray2rgb(imgs) clips = [med.ImageClip(img).set_duration(1 / fps) for img in imgs] mov = med.concatenate_videoclips(clips, method='compose') mov = mov.set_fps(fps) return mov
def test_issue_145(): video = ColorClip((800, 600), color=(255, 0, 0)).set_duration(5) with pytest.raises(Exception): concatenate_videoclips([video], method="composite")
def create_text_clips(request): trip_stats = process_user_stats() # trip introduction #screensize = (720, 460) screensize = (1024, 780) txt_intro = TextClip('{0} Just Back From...'.format(trip_stats["username"]), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 80)) for idx, d in enumerate(trip_stats["destinations"]): txt_clip = TextClip(d, color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 120+idx*20)) if idx == 0: txt_dest1 = txt_clip elif idx == 1: txt_dest2 = txt_clip elif idx == 2: txt_dest3 = txt_clip else: txt_dest4 = txt_clip txt_published_on = TextClip(trip_stats['published_date'], color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 220)) # final trip stats txt_trip_stats = TextClip('TRIP STATS', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 120)) if trip_stats['via']: txt_via = TextClip('Via {0}'.format(trip_stats['via']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 80)) if trip_stats['miles']: txt_miles = TextClip('{0} Round-trip miles'.format(trip_stats['miles']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 100)) txt_is_international_trip = TextClip('International trip' if trip_stats['international'] else 'Domestic trip', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 120)) if trip_stats['cities_qty']: txt_cities_qty = TextClip('{0} cities'.format(trip_stats['cities_qty']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 140)) if trip_stats['states_qty']: txt_states_qty = TextClip('{0} U.S. state'.format(trip_stats['states_qty']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 160)) if trip_stats['foreign_countries_qty']: txt_foreign_countries_qty = TextClip('{0} foreign country'.format(trip_stats['foreign_countries_qty']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 180)) if trip_stats['natpark']: txt_foreign_countries_qty = TextClip('{0} National parks'.format(trip_stats['natpark']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 200)) if trip_stats['events_qty']: txt_foreign_countries_qty = TextClip('{0} events'.format(trip_stats['events_qty']), color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((40, 220)) #todo Last screen! #Boastable #Your life in travel #See Santi's whole trip #agregar los clips que existen, chequear!! cvc = CompositeVideoClip([txt_intro, txt_dest1, txt_dest2, txt_dest3, txt_dest4, txt_published_on], size=screensize) # helper function rot_matrix = lambda a: np.array([[np.cos(a), np.sin(a)], [-np.sin(a), np.cos(a)]]) def cascade(screenpos, i, nletters): v = np.array([0,-1]) d = lambda t: 1 if t<0 else abs(np.sinc(t)/(1+t**4)) return lambda t: screenpos+v*400*d(t-0.15*i) def vortexout(screenpos,i,nletters): d = lambda t : max(0,t) #damping a = i*np.pi/ nletters # angle of the movement v = rot_matrix(a).dot([-1,0]) if i % 2: v[1] = -v[1] return lambda t: screenpos+400*d(t-0.1*i)*rot_matrix(-0.2*d(t)*a).dot(v) letters = findObjects(cvc) # a list of ImageClips def moveLetters(letters, funcpos): return [letter.set_pos(funcpos(letter.screenpos, i, len(letters))) for i, letter in enumerate(letters)] clips = [CompositeVideoClip(moveLetters(letters, funcpos), size=screensize).subclip(0, 3) for funcpos in [cascade, vortexout]] final_clip = concatenate_videoclips(clips) final_clip.write_videofile('videos/presentationVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
def build_default_video(self, process_data): """ Builds a video with the default settings. """ if self.source_video is None: video = mpy.ColorClip( (1280, 720), duration=self.telemetry_data[-1][0][-1][-1]) elif isinstance(self.video_skipstart, float) \ or isinstance(self.video_skipend, float): video = mpy.VideoFileClip( self.source_video).subclip( self.video_skipstart, self.video_skipend) else: raise ValueError( "ValueError: Blackframe Detection disabled.") video_width, video_height = video.size if self.backdrop != "": backdrop = Image.open(self.backdrop).resize( (video_width, video_height)) if self.logo != "": logo = Image.open(self.logo).resize( (self.logo_width, self.logo_height)) backdrop.paste( logo, ( backdrop.width-logo.width, backdrop.height-logo.height), logo) else: backdrop = Image.new( 'RGBA', (video_width, video_height), (0, 0, 0)) if self.logo != "": logo = Image.open(self.logo).resize( (self.logo_width, self.logo_height)) backdrop.paste( logo, ( backdrop.width-logo.width, backdrop.height-logo.height), logo) backdrop_clip = mpy.ImageClip(PIL_to_npimage(backdrop)) title = mpy.ImageClip(Title(self).to_frame()).set_duration( 5).set_position(('center', 'center')) standing = UpdatedVideoClip(Standings( self, process_data=process_data)) standing = standing.set_position( (self.margin, self.margin)).set_duration(video.duration) standing_mask = mpy.ImageClip( Standings( self, process_data=process_data).make_mask(), ismask=True, duration=video.duration) standing = standing.set_mask(standing_mask) timer = UpdatedVideoClip( Timer(self, process_data=process_data)) timer_width, _ = timer.size timer = timer.set_position( (video_width-timer_width-self.margin, self.margin) ).set_duration(video.duration) timer_mask = mpy.ImageClip( Timer(self, process_data=process_data).make_mask(), ismask=True, duration=video.duration) timer = timer.set_mask(timer_mask) result = mpy.ImageClip( Results(self).to_frame()).set_duration( 20).set_position(('center', 'center')).add_mask() if self.point_structure is not None: result.mask = result.mask.fx(vfx.fadeout, 1) series_standings = mpy.ImageClip( SeriesStandings(self).to_frame()).set_start( 20).set_duration(20).set_position( ('center', 'center')).add_mask() if self.show_champion: series_standings.mask = series_standings.mask.fx( vfx.fadein, 1).fx(vfx.fadeout, 1) champion = mpy.ImageClip( Champion(self).to_frame()).set_start( 40).set_duration(20).set_position( ('center', 'center')).add_mask() champion.mask = champion.mask.fx(vfx.fadein, 1) else: series_standings.mask = series_standings.mask.fx( vfx.fadein, 1) else: if self.show_champion: result.mask = result.mask.fx(vfx.fadeout, 1) champion = mpy.ImageClip( Champion(self).to_frame()).set_start( 20).set_duration(20).set_position( ('center', 'center')).add_mask() champion.mask = champion.mask.fx(vfx.fadein, 1) intro = mpy.CompositeVideoClip( [backdrop_clip, title]).set_duration(5).fx(vfx.fadeout, 1) mainevent = mpy.CompositeVideoClip( [video, standing, timer]).set_duration(video.duration) outro_videos = [backdrop_clip, result] if self.point_structure is not None: outro_videos.append(series_standings) if self.show_champion: outro_videos.append(champion) outro = mpy.CompositeVideoClip(outro_videos).set_duration( sum([x.duration for x in outro_videos[1:]])).fx( vfx.fadein, 1) output = mpy.concatenate_videoclips([intro, mainevent, outro]) return output
import sys import moviepy.editor as mp videos = [] for filename in sys.argv[1:]: video = mp.VideoFileClip(filename) # just get the first second video = video.subclip(0, 1) # make all videos the same size video = video.resize((1280, 720)) videos.append(video) final_video = mp.concatenate_videoclips(videos, method="compose") final_video.write_videofile('composition.mp4', codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# font='Amiri-Bold', fontsize=100, bg_color= 'white', color="grey20").set_duration(1) final= concatenate_videoclips([sub1, sub2], method= 'compose') final.write_videofile('bam-3.cut.mp4', fps= 3, codec= 'mpeg4') clip = VideoFileClip("bam-3.cut.mp4") subs = [((0, 3.17), 'Load bam file'), ((3.17, 7.21), 'Go to region'), ((7.21, 12), 'Zoom in'), ((12, 15), 'Move forward'), ((15, 18), 'Zoom out'), ((18, 26), 'Filter reads') ] annotated_clips = [annotate(clip.subclip(from_t, to_t), txt, txt_color= 'blue') for (from_t, to_t), txt in subs] final_clip = editor.concatenate_videoclips(annotated_clips) final_clip.write_videofile("bam-3.subs.mp4", fps= 3) # ---------------------- 8< ---------------------------------------------------- os.chdir("/Users/berald01/Desktop/asciigenome_demo/") clip = VideoFileClip("bigWig-2.mov") cat= [clip.subclip(20.00, 22.00), clip.subclip(24.00, 29.00), clip.subclip(30.00, 35.00), clip.subclip(41.00, 45), clip.subclip(46.5, 48), clip.subclip(51, 53), clip.subclip(56, 64), clip.subclip(80, 82),
timestamp_clip = TextClip(timestamp, color='white', font='arial', fontsize=24, size=image_clip.size).set_duration(image_clip.duration).set_position((-500,350)) clips += [CompositeVideoClip([image_clip, timestamp_clip])] input_paths = [input_dir + x] hour = x[17:19] # print(clips) # w,h = moviesize = clip.size # build input file paths # input_paths = [input_dir + x for x in input_files] # set output file path if args.date: output_path = output_dir + 'timelapse_' + str(input_files[0][8:16]) + '.mp4' elif args.days: output_path = output_dir + 'timelapse_' + str(args.days) + 'd.mp4' else: output_path = output_dir + 'timelapse_' + str(input_files[0][8:16]) + '-' + str(input_files[-1][8:16]) + '.mp4' # write compiled video final_clip = concatenate_videoclips(clips) final_clip.write_videofile(output_path, audio=False) # exit logger.debug('Finished!') sys.exit()
def stitch_video_clips(video_clips_times, peaks, song_path): audio = AudioFileClip(song_path) sorted_peaks = sorted([0.00] + peaks) # Get lengths of video clips and audio clips video_clips_times_and_lengths, audio_clip_lengths = format_videos_and_audio(video_clips_times, sorted_peaks) used_video_clips = set() video_order = [] j = 0 print 'peaks', sorted_peaks prefix = '/uploads' flag = True while(len(used_video_clips) < len(video_clips_times_and_lengths) and flag): # Pick random video that we haven't used. i = random.randint(0, len(video_clips_times_and_lengths) - 1) if i not in used_video_clips: used_video_clips.add(i) video_clip = video_clips_times_and_lengths[i] print 'start ', sorted_peaks[j] # Check for peak that is at end or right before for p in range(j, len(sorted_peaks)): if sorted_peaks[j] + video_clip['length'] > 30.00: flag = False new_length = 30.00 - sorted_peaks[j] print 'new_length', new_length break if sorted_peaks[p] > sorted_peaks[j] + video_clip['length']: new_length = sorted_peaks[p - 1] - sorted_peaks[j] print 'new_length2', new_length j = p - 1 break elif sorted_peaks[p] == sorted_peaks[j] + video_clip['length']: new_length = sorted_peaks[p] - sorted_peaks[j] print 'new_length3', new_length j = p break # Truncate and create the video clip and add it to the order. if (flag): if video_clip['video_name'][:len(prefix)] == prefix: video_clip['video_name'] = video_clip['video_name'][len(prefix):] filename = app.config['UPLOAD_FOLDER'] + '/' + video_clip['video_name'] v = VideoFileClip(filename, audio=True).subclip(video_clip['start'], video_clip['start'] + new_length) print 'add to video', video_clip['video_name'] video_order.append(v) final_video = concatenate_videoclips(video_order, method="compose") if not flag: print "clipping short", new_length final_video = final_video.subclip(0, -new_length) else: final_video = final_video.subclip(0, 30) filename = str(random.randint(0, 1000000000000)) + '.mp4' final_video_path = 'final_videos/' + filename final_video.write_videofile(final_video_path, audio=song_path, codec='libx264', fps=30, audio_fps=44100, preset='ultrafast', threads=2) return filename
def test_issue_416(): # ColorClip has no fps attribute. green = ColorClip((640, 480), color=(0, 255, 0)).set_duration(2) video1 = concatenate_videoclips([green]) assert video1.fps is None
def randomFilm ( path_s, first_clip_option=False, result_path=None, result_name=None, text_overlays_on=True, text_path_s=None, text_overlays_num=None, text_method_s=['twitter','lecture','poem'], #,'translate' text_interval=(1,30), fontsize_interval=(50,300), duration_end=None, duration_interval=(1, 600), clip_interval=(1, 30), accepted_fps=None, accepted_input_types = ['avi', 'mp4', 'MTS', 'mov','dv','mpg'], output_type = 'mp4', output_codec='libx264', verbose=True): """ Returns none. Outputs a random film given a folder with video files. DEFAULTS: path_s >>> (str/list) directory of video file(s) to be compiled into output file first_clip_option >>> (T/F) True, first clip will be FIRST_CLIP imported above result_path >>> (str) OUT_DIREC directory imported above, directory of output file result_name >>> (str) randomly generated from text files, name of output file text_overlays_on >>> (T/F) True, overlays are on text_path_s >>> (str/list) TXT_DIREC directory imported above, directory of text file(s) for overlays text_overlays_num >>> (int) random.randint(1,1+int(duration_end/8)), number of text overlays text_method >>> (str/list) 'twitter', 'lecture', 'poem', 'translate', method(s) of generating text overlays text_interval >>> (tuple) (1,30), range of duration interval for text overlays fontsize_interval >>> (tuple) (50,300), range of font size for text overlays duration_end >>> (int) random selection from duration interval, duration of output film duration_interval >>> (tuple) (1, 600), range of duration interval for output film clip_interval >>> (tuple) (1, 30), range of duration interval for clips for output film accepted_fps >>> (int) all framerates, specifies a framerate for clips to be accepted for the film accepted_input_types >>> (list) ['avi', 'mp4', 'MTS', 'mov','dv','mpg'], list of accepted input file types output_type >>> (str) 'mp4', output file type output_codec >>> (str) 'libx264', output file codec """ global OUT_DIREC, TXT_DIREC, FIRST_CLIP if not result_path: result_path = OUT_DIREC # use OUT_DIREC if no directory is provided if not text_path_s: text_path_s = TXT_DIREC # use TXT_DIREC if no directory is provided text_name_list = collectFiles(text_path_s, 'txt') clip_name_list = collectFiles(path_s, accepted_input_types, weighted=True) if not duration_end: duration_end = random.randint(duration_interval[0], duration_interval[1]) verbosePrint(verbose, 'Duration of output film: ' + str(duration_end)) if not text_overlays_on: text_overlays_num = 0 elif not text_overlays_num: text_overlays_num = random.randint(1,1+int(duration_end/8)) if first_clip_option: first_clip_name = FIRST_CLIP else: first_clip_name = None tarkovsky = director.Director(first_clip_name, duration_end, text_overlays_num, text_name_list, clip_name_list, clip_interval, text_interval) while tarkovsky.getCurrDuration('clip') < duration_end: adj_clip_name_list, adj_clip_low, adj_clip_high = tarkovsky.adjustParams('clip') new_clip, new_clip_start, new_clip_name = generateClip(adj_clip_name_list, duration=random.randint(adj_clip_low, adj_clip_high), accepted_fps=accepted_fps, verbose=verbose) tarkovsky.addClip('clip', new_clip, new_clip_start, new_clip_name, (adj_clip_low, adj_clip_high)) verbosePrint(verbose, 'Current duration of film: ' + str(tarkovsky.getCurrDuration('clip'))) output_clip = mpy.concatenate_videoclips([ clip for clip in tarkovsky.outclip_df['clip_object'] if clip ]) verbosePrint(verbose, 'Clips concatenated...') if text_overlays_on: verbosePrint(verbose, 'Number of text overlays: ' + str(text_overlays_num)) while tarkovsky.getCurrDuration('text') < text_overlays_num: adj_text_name_list, adj_text_low, adj_text_high = tarkovsky.adjustParams('text') text_clip, text_clip_name = generateTextOverlay(adj_text_name_list, text_method_s=text_method_s, duration=random.randint(adj_text_low, adj_text_high), fontsize=random.randint(fontsize_interval[0],fontsize_interval[1]), verbose=verbose) if text_clip: text_clip_start = random.randint(1, max(int(duration_end - text_clip.duration), 1)) verbosePrint(verbose, 'Text overlay will start at: ' + str(text_clip_start)) text_clip = text_clip.set_start(text_clip_start) else: text_clip_start = 0 tarkovsky.addClip('text', text_clip, text_clip_start, text_clip_name, (adj_text_low, adj_text_high)) text_clip_list = [ clip for clip in tarkovsky.outtext_df['clip_object'] if clip ] text_clip_list.insert(0, output_clip) output_clip = mpy.CompositeVideoClip(text_clip_list) verbosePrint(verbose, 'Text overlays added...') if output_clip.duration > duration_end: output_clip = output_clip.subclip(0, duration_end) verbosePrint(verbose, 'Film cut to duration end') try: verbosePrint(verbose, 'Output film framerate: ' + str(output_clip.fps)) except AttributeError: verbosePrint(verbose, 'ERROR: the framerates of the clips did not all match, changed fps to 25') output_clip = output_clip.set_fps(25) if not result_name: result_name = generateTitle(text_name_list, verbose) destination, log_destination = createDestination(result_path, output_name=result_name, output_type=output_type) tarkovsky.writeLog(log_destination) verbosePrint(verbose, 'Log file written to destination, ' + log_destination) if yesInput('>>Would you like to write this film?\n'): output_clip.write_videofile(destination, fps=output_clip.fps, codec=output_codec) verbosePrint(verbose, 'Video file written to destination, ' + destination)
duration = et - st maxi /= (i-s) heappush(blocks,(1-maxi,st,et,duration)) # because it's min-heap s = None maxi = 0 # clips selection selecteds = [] time_remain = int(args.sec) # sec while time_remain > 0: block = heappop(blocks) selected = (block[1],block[2],block[3],block[0]) heappush(selecteds, selected) time_remain -= block[3] selecteds.sort(key=get_key) # make highlight clips = [] for selected in selecteds: tname = 'tttmp\{}.mp4'.format(selected[0]) ffmpeg_extract_subclip(hfilename, selected[0], selected[1], targetname=tname) clips.append(VideoFileClip(tname)) final_clip = concatenate_videoclips(clips) final_clip.write_videofile(args.out)
for i, clip in enumerate(video_files): video_files[i] = VideoFileClip(clip) print("Number of video files loaded: " + str(len(video_files))) #title frontend title = input("Enter Title Screen Text, leave blank and hit enter to skip: ") #endscreen endtext = input("Enter Endscreen Text, leave blank and hit enter to skip: ") if endtext != "": video_files.append( TextClip(endtext, fontsize=50, color='white').set_duration(5)) #combine video clips combined_clip = concatenate_videoclips(video_files, method="compose") #title backend if title != "": txt_clip = TextClip(title, fontsize=70, color='white') txt_clip = txt_clip.set_pos('bottom').set_duration(5) video = CompositeVideoClip([combined_clip, txt_clip]) else: video = combined_clip #audio audio_files = [] for i in os.listdir(): if i.endswith(".mp3") or i.endswith(".wav"): audio_files.append(i)
from os import listdir from os.path import isfile, join from moviepy.editor import VideoFileClip, concatenate_videoclips ''' mypath = "downloads" files = listdir(mypath) print(files) #concatenation clips = [] for filename in files: path = "downloads/" + filename print(path) clips.append(VideoFileClip(path)) final = concatenate_videoclips(clips) final.write_videofile("downloads/final.mp4") ''' clip1 = VideoFileClip("downloads/clip1.mp4") clip2 = VideoFileClip("downloads/clip2.mp4") clip12 = concatenate_videoclips([clip1, clip2]) clip3 = VideoFileClip("downloads/clip3.mp4") clip4 = VideoFileClip("downloads/clip4.mp4") clip5 = VideoFileClip("downloads/clip5.mp4") clip345 = concatenate_videoclips([clip3, clip4, clip5]) final_clip = concatenate_videoclips([clip12, clip345]) final_clip.write_videofile("my_concatenation.mp4")
def main(args): VOD_ID = args.vodID SAMPLE_WINDOW = args.sample_window INTRO_TITLE = args.title SUSPENSE_FILE = args.suspense OUTPUT_PATH = args.output_path VOD_PATH = args.input_path EDIT_WINDOW = args.edit_window # Import Chat print("Formatting chat data") chat_path = f"{VOD_PATH}/{str(VOD_ID)}/chat.log" # Import VOD vod_file = f"{VOD_PATH}/{str(VOD_ID)}/vod.mkv" vod = mpy.VideoFileClip(vod_file) START_TIME_FILTER = "00:00:00" END_TIME = vod.duration END_TIME_FILTER = datetime.datetime.fromtimestamp( int(END_TIME)).strftime('%H:%M:%S') df_chat = gatherChat(chat_path, START_TIME_FILTER, END_TIME_FILTER) # sample bin window to group by df_sample_chat = ( df_chat.set_index("timestamp").resample(str(SAMPLE_WINDOW) + "s")["message"].agg( ["sum", "count"])) clips = [] introClip = createIntroClip(INTRO_TITLE, vod.size) clips.append(introClip) # Gather pogs emotes emotes_interest = [s.strip() for s in args.emotes.split(",")] # time of interest for emote in emotes_interest: df_sample_chat[emote + "_count"] = (df_sample_chat["sum"].astype( str).apply(lambda msg: len(re.findall(emote, msg)))) # Gather clips print(f"Gathering pog moment: {emote}") #Sort the dataset by the highest emote and grab the timestamp from it pogMomentTimes = [] pogMomentTimes = (df_sample_chat.sort_values([emote + "_count" ]).tail(5)) pogMomentTimes = pogMomentTimes.sort_values("timestamp") pogMomentTimes = pogMomentTimes.index.tolist() for moments in pogMomentTimes: #Clip based on vod, timestamp, EDIT WINDOW = How much time before and after clip = clipIt(vod, moments, EDIT_WINDOW, VOD_ID, SUSPENSE_FILE) clips.append(clip) # deletin vod to free up mem del vod print("Editing vod clips") OUTPUT_PATH_VOD = f"{OUTPUT_PATH}/{str(VOD_ID)}" if not os.path.exists(OUTPUT_PATH_VOD): os.makedirs(OUTPUT_PATH_VOD) concatClip = mpy.concatenate_videoclips(clips) EXPORT_FILE_PATH = f"{OUTPUT_PATH_VOD}/previouslyClip.mp4" concatClip.write_videofile(EXPORT_FILE_PATH) print("Previously on clip saved to: ", EXPORT_FILE_PATH) del concatClip # exporting each clip print("Exporting clips") for clip, emote in zip(clips, emotes_interest): clip.write_videofile(f"{OUTPUT_PATH_VOD}/{emote}.mp4")
input_video = ffmpeg.input("../resources/video_with_audio.mp4") added_audio = ffmpeg.input("../resources/dance_beat.ogg").audio.filter( 'adelay', "1500|1500") merged_audio = ffmpeg.filter([input_video.audio, added_audio], 'amix') (ffmpeg.concat(input_video, merged_audio, v=1, a=1).output("mix_delayed_audio.mp4").run(overwrite_output=True)) # In[25]: import moviepy.editor as mp audio = mp.AudioFileClip(audioPath) video = mp.VideoFileClip(videoPath) final = mp.concatenate_videoclips([video]) # video = video.set_duration(audio) final = final.set_audio(audio.set_duration(final)) # video1.set_audio(audio) #video2 = mp.VideoFileClip("video2.mp4") # final = mp.concatenate_videoclips([video1, video2]).set_audio(audio) # final.write_videofile("output.mp4") final.write_videofile("Report/Test-Back_2.mp4") # In[18]: from moviepy.editor import VideoFileClip, AudioFileClip clip_video = VideoFileClip("Test-Back_without_Audio.mp4") clip_audio = AudioFileClip('Report/Brand_X_Music-Buccaneer_Island.mp3')
def repeat(self) -> None: """Concatenates a video and audio stream with itself to make a twice as long video""" if self.has_video: self.clip = med.concatenate_videoclips([self.clip, self.clip]) else: self.clip = med.concatenate_audioclips([self.clip, self.clip])
def create_presentation_video(request): # trip introduction #screensize = (720, 460) screensize = (1024, 780) txt_intro = TextClip('Just Back From...', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 80)) txt_dest1 = TextClip('Seville, Spain', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 120)) txt_dest2 = TextClip('Costa Brava, Spain', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 160)) txt_dest3 = TextClip('Arles, France', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 200)) txt_dest4 = TextClip('Eze, France', color='white', font="Amiri-Bold", kerning=2, fontsize=50).set_position((10, 240)) #title_clip = (TextClip("Just Back From...", fontsize=35, # font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, # bg_color='#e04400', method='caption', align='center', size=(image_clips.w, image_clips.h)) # .margin(top=5, opacity=0) # .set_duration(3).fadein(.5).fadeout(.5) # .set_position(("center", "top"))) #txt = "\n".join([ #"Just Back From...", #"Seville, Spain", #"Costa Brava, Spain", #"Arles, France", #"Eze, France" #]) #txt_clip1 = TextClip(txt, color='white', font="Amiri-Bold", # kerning=2, fontsize=50).set_position((10, 80)) #clip_txt = TextClip(txt,color='white', align='West',fontsize=25, # font='Xolonium-Bold', method='label') #txt_clips = clips_array([[txt_clip1, txt_clip2]]) #cvc = CompositeVideoClip([txt_clip1, txt_clip2, txt_clip3, txt_clip4, txt_clip5], cvc = CompositeVideoClip([txt_intro, txt_dest1], size=screensize) # helper function rot_matrix = lambda a: np.array([[np.cos(a), np.sin(a)], [-np.sin(a), np.cos(a)]]) def cascade(screenpos, i, nletters): v = np.array([0,-1]) d = lambda t: 1 if t<0 else abs(np.sinc(t)/(1+t**4)) return lambda t: screenpos+v*400*d(t-0.15*i) def vortexout(screenpos,i,nletters): d = lambda t : max(0,t) #damping a = i*np.pi/ nletters # angle of the movement v = rot_matrix(a).dot([-1,0]) if i % 2: v[1] = -v[1] return lambda t: screenpos+400*d(t-0.1*i)*rot_matrix(-0.2*d(t)*a).dot(v) letters = findObjects(cvc) # a list of ImageClips def moveLetters(letters, funcpos): return [letter.set_pos(funcpos(letter.screenpos, i, len(letters))) for i, letter in enumerate(letters)] clips = [CompositeVideoClip(moveLetters(letters, funcpos), size=screensize).subclip(0, 3) for funcpos in [cascade, vortexout]] final_clip = concatenate_videoclips(clips) final_clip.write_videofile('videos/presentationVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
def svg_to_mp4(svg_tree, audio_filename, config_filename, output_filename, begin_time=0.0, end_time=3.0, padding_duration=0.0, default_length=4.0): #clips = [] image_paths = [] if audio_filename: audio_clip = mp.AudioFileClip(audio_filename) end_time = audio_clip.duration if config_filename: config = load_json(config_filename) background_filename = config.get("bg-image", "") fps = config.get("fps", 30) else: background_filename = "" fps = 30 frame_duration = 1.0 / fps start_time_floor = math.floor(begin_time * fps) / fps end_time_floor = math.floor(end_time * fps) / fps + padding_duration current_time = start_time_floor snapshot_svg = SnapshotSVG(svg_tree) small_chunk_paths = [] large_chunk_paths = [] rgb_str = config.get("bg-color", "rgb(0,0,0)") rgb_int = rgb_to_hex(rgb_str) frame_idx = 0 while True: if len(image_paths) >= FRAMES_PER_CHUNK: small_chunk_path = write_small_chunk(image_paths, background_filename, fps) small_chunk_paths.append(small_chunk_path) image_paths = [] if len(small_chunk_paths) >= CHUNKS_PER_LARGE_CHUNK: large_chunk_path = write_large_chunk(small_chunk_paths, fps) large_chunk_paths.append(large_chunk_path) small_chunk_paths = [] frozen_svg = snapshot_svg[current_time] svg_path = f"temp/temp.svg" tiff_path = f"temp/temp.{frame_idx}.tiff" save_xml(svg_path, frozen_svg) drawing = svg2rlg(svg_path) renderPM.drawToFile(drawing, tiff_path, fmt="TIFF", bg=rgb_int, configPIL={'transparent': toColor(rgb_str)}) #imageClip = mp.ImageClip(tempfile_basename + ".png").set_duration(frame_duration) #maskClip = mp.ImageClip(tempfile_basename + ".png", ismask=True) #imageClip.set_mask(maskClip) #renderPM.drawToFile(drawing, tempfile_basename + ".gif", fmt="GIF", bg=0xffff00, configPIL={'transparent': 0xffff00}) #p = renderPM.drawToPIL(drawing, bg=0xffffff, configPIL={'transparent': 0xffffff}) #p.save(tempfile_basename + ".png") #imageClip = mp.ImageClip(tempfile_basename + ".tiff", transparent=True).set_duration(frame_duration) image_paths.append(tiff_path) current_time += frame_duration frame_idx += 1 if current_time >= end_time_floor - 0.000001: # tiny adjustment to avoid doubling a frame due to floating point error break if image_paths: small_chunk_path = write_small_chunk(image_paths, background_filename, fps) small_chunk_paths.append(small_chunk_path) if small_chunk_paths: large_chunk_path = write_large_chunk(small_chunk_paths, fps) large_chunk_paths.append(large_chunk_path) movie_chunks = [mp.VideoFileClip(c) for c in large_chunk_paths] result_clip = mp.concatenate_videoclips( movie_chunks, method="compose").set_duration(end_time_floor - start_time_floor) if audio_filename: result_clip.audio = audio_clip result_clip.write_videofile(output_filename, fps=fps) #, codec="mpeg4") for clip in movie_chunks: clip.close() for path in large_chunk_paths: os.remove(path) result_clip.close() return output_filename
def makeVideoFromClipDetails(videoname="output", singleOutput=True): clipList = [] for clipDetails in clipDetailsList: clipList.append(clipDetails.clip) final_clip = concatenate_videoclips(clipList, method="compose") final_clip.write_videofile(videoname + ".mp4")
intro_text = intro_text.set_duration(intro_duration) intro_text = intro_text.set_fps(fps) intro_text = intro_text.set_pos("center") # to add audio to your intro: intro_music = audio_clip.subclip(25, 30) intro_text = intro_text.set_audio(intro_music) watermark_size = 50 watermark_text = TextClip(watermark, fontsize=watermark_size, color='black', align='East', size=(w, watermark_size)) watermark_text = watermark_text.set_fps(fps) watermark_text = watermark_text.set_duration(video_clip.reader.duration) watermark_text = watermark_text.margin(left=10, right=10, bottom=2, opacity=0) watermark_text = watermark_text.set_position(("bottom")) watermarked_clip = CompositeVideoClip([video_clip, watermark_text], size=video_clip.size) watermarked_clip = watermarked_clip.set_duration(video_clip.reader.duration) watermarked_clip = watermarked_clip.set_fps(fps) watermarked_clip = watermarked_clip.set_audio(final_audio) final_clip = concatenate_videoclips([intro_text, watermarked_clip]) final_clip.write_videofile(final_video_path, codec='libx264', audio_codec="aac")
def create_overall_quality_video(request): trip_stats = process_user_stats() #load images image1 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image2 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image3 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image4 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image5 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image6 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image7 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image8 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image9 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') image10 = ImageClip("media/real pics/"+random.choice(os.listdir("media/real pics/"))).set_pos('center') #calculate max width and height images = [] images.extend([image1, image2, image3, image4, image5, image6, image7, image8, image9, image10]) max_width = 0 max_height = 0 for img in images: if img.size[0] > max_width: max_width = img.size[0] if img.size[1] > max_height: max_height = img.size[1] #create blurred images image1 = CompositeVideoClip([image1.resize((max_width, max_height)).fl_image(blur), image1.resize(.95)]) image2 = CompositeVideoClip([image2.resize((max_width, max_height)).fl_image(blur), image2.resize(.95)]) image3 = CompositeVideoClip([image3.resize((max_width, max_height)).fl_image(blur), image3.resize(.95)]) image4 = CompositeVideoClip([image4.resize((max_width, max_height)).fl_image(blur), image4.resize(.95)]) image5 = CompositeVideoClip([image5.resize((max_width, max_height)).fl_image(blur), image5.resize(.95)]) image6 = CompositeVideoClip([image6.resize((max_width, max_height)).fl_image(blur), image6.resize(.95)]) image7 = CompositeVideoClip([image7.resize((max_width, max_height)).fl_image(blur), image7.resize(.95)]) image8 = CompositeVideoClip([image8.resize((max_width, max_height)).fl_image(blur), image8.resize(.95)]) image9 = CompositeVideoClip([image9.resize((max_width, max_height)).fl_image(blur), image9.resize(.95)]) image10 = CompositeVideoClip([image10.resize((max_width, max_height)).fl_image(blur), image10.resize(.95)]) #concatenate clips, play one clip after the other image_clips = concatenate_videoclips([image1.set_duration(2).fadein(.5).fadeout(.5), image2.set_duration(2).fadein(.5).fadeout(.5), image3.set_duration(2).fadein(.5).fadeout(.5), image4.set_duration(2).fadein(.5).fadeout(.5), image5.set_duration(2).fadein(.5).fadeout(.5), image6.set_duration(2).fadein(.5).fadeout(.5), image7.set_duration(2).fadein(.5).fadeout(.5), image8.set_duration(2).fadein(.5).fadeout(.5), image9.set_duration(2).fadein(.5).fadeout(.5), image10.set_duration(2).fadein(.5).fadeout(.5)]) title_clip = (TextClip("Just Back From...", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(max_width, max_height)) .margin(top=5, opacity=0) .set_duration(3).fadein(.5).fadeout(.5) .set_position(("center", "top"))) stats_clip = (TextClip("See Santi's recent trip of 1,836 round trip miles, with stops..", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(max_width, max_height)) .margin(top=5, opacity=0) .set_duration(4).fadein(.5).fadeout(.5) .set_position(("center", "top"))) final_clip = concatenate_videoclips([title_clip, image_clips, stats_clip], method="compose", padding=-1) audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.5) #write_videofile -> preset : #Sets the time that FFMPEG will spend optimizing the compression. # Choices are: ultrafast, superfast, fast, medium, slow, superslow. # Note that this does not impact the quality of the video, only the size of the video file. # So choose ultrafast when you are in a hurry and file size does not matter. final_clip.write_videofile('videos/overallQualityVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') #final_clip.write_gif('videos/overallQuality.gif', fps=23) html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
""" Using This Code Example ========================= The code examples are provided by Yasoob Khalid to help you reference Practical Python Projects book. Code samples follow PEP-0008, with exceptions made for the purposes of improving book formatting. Example code is provided "as is". Permissions ============ In general, you may use the code we've provided with this book in your programs . You do not need to contact us for permission unless you're reproducing a significant portion of the code and using it in educational distributions. Examples: * Writing an education program or book that uses several chunks of code from this course requires permission. * Selling or distributing a digital package from material taken from this book does require permission. * Answering a question by citing this book and quoting example code does not require permission. Attributions usually include the title, author, publisher and an ISBN. For example, "Practical Python Projects, by Yasoob Khalid. Copyright 2020 Yasoob." If you feel your use of code examples falls outside fair use of the permission given here, please contact me at [email protected]. """ from moviepy.editor import (VideoFileClip, concatenate_videoclips) clip1 = VideoFileClip('Woman Walks Ahead.Trailer.720p.mov') clip2 = VideoFileClip('Incredibles 2.Trailer.720p.mov') final_clip = concatenate_videoclips([clip1, clip2]) final_clip.write_videofile("combined trailers.mp4")
from moviepy.editor import VideoFileClip, concatenate_videoclips clip1 = VideoFileClip("highlight1.mp4") clip2 = VideoFileClip("highlight2.mp4") clip3 = VideoFileClip("highlight3.mp4") clip4 = VideoFileClip("highlight5.mp4") clip5 = VideoFileClip("highlight6.mp4") clip6 = VideoFileClip("highlight7.mp4") clip7 = VideoFileClip("highlight8.mp4") clip8 = VideoFileClip("highlight9.mp4") clip9 = VideoFileClip("highlight10.mp4") clip10 = VideoFileClip("highlight11.mp4") clip11 = VideoFileClip("highlight12.mp4") clip12 = VideoFileClip("highlight13.mp4") clip13 = VideoFileClip("highlight14.mp4") clip14 = VideoFileClip("highlight4.mp4") final_clip = concatenate_videoclips([ clip1, clip2, clip3, clip4, clip5, clip6, clip7, clip8, clip9, clip9, clip10, clip11, clip12, clip13, clip14 ]) final_clip.write_videofile("final_highlights.mp4")
img1 = Image.new('RGB', (clip2.size[0], clip2.size[1]), color='black') d_black = ImageDraw.Draw(img1) img1.save(directory + '/black.png') print(directory) print(video) cleanFname = re.search(directory + '\\' + '\(.*).mp4', video).group(1) print(cleanFname) #Concat all the clips together clips = [ ImageClip(directory + '/blue.png').set_duration(2), ImageClip(directory + '/black.png').set_duration(3), videoclip2, ImageClip(directory + '/black.png').set_duration(3) ] concat_clip = mp.concatenate_videoclips(clips) concat_clip.write_videofile(oid_dir + str(oid) + cleanFname + '.mp4', audio_codec='libmp3lame') #find flies line in sheet and insert oid in col B for rowx in range(1, sheet.nrows): if sheet.cell_value(rowx, 22) == cleanFname: ws.cell(column=1, row=rowx, value=oid) #move original files to new folder os.rename(video, origin_dir + cleanFname + '.mp4') oid += 1 wb.save(filename=xlsx_file[0])
def create_video(request): screensize = (720,460) txtClip = TextClip('Cool effect', color='white', font="Amiri-Bold", kerning=5, fontsize=100) cvc = CompositeVideoClip( [txtClip.set_pos('center')], size=screensize) # THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS # helper function rotMatrix = lambda a: np.array( [[np.cos(a),np.sin(a)], [-np.sin(a),np.cos(a)]] ) def vortex(screenpos,i,nletters): d = lambda t : 1.0/(0.3+t**8) #damping a = i*np.pi/ nletters # angle of the movement v = rotMatrix(a).dot([-1,0]) if i%2 : v[1] = -v[1] return lambda t: screenpos+400*d(t)*rotMatrix(0.5*d(t)*a).dot(v) def cascade(screenpos,i,nletters): v = np.array([0,-1]) d = lambda t : 1 if t<0 else abs(np.sinc(t)/(1+t**4)) return lambda t: screenpos+v*400*d(t-0.15*i) def arrive(screenpos,i,nletters): v = np.array([-1,0]) d = lambda t : max(0, 3-3*t) return lambda t: screenpos-400*v*d(t-0.2*i) def vortexout(screenpos,i,nletters): d = lambda t : max(0,t) #damping a = i*np.pi/ nletters # angle of the movement v = rotMatrix(a).dot([-1,0]) if i%2 : v[1] = -v[1] return lambda t: screenpos+400*d(t-0.1*i)*rotMatrix(-0.2*d(t)*a).dot(v) # WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER letters = findObjects(cvc) # a list of ImageClips # WE ANIMATE THE LETTERS def moveLetters(letters, funcpos): return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters))) for i,letter in enumerate(letters)] clips = [ CompositeVideoClip(moveLetters(letters,funcpos), size = screensize).subclip(0,5) for funcpos in [vortex, cascade, arrive, vortexout] ] # WE CONCATENATE EVERYTHING AND WRITE TO A FILE final_clip = concatenate_videoclips(clips) audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0) #final_clip = vfx.resize(final_clip, (570, 570)) final_clip.write_videofile('videos/coolTextEffects.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') #final_clip.write_gif('videos/coolGif.gif', fps=23) html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)
def test_issue_334(): # NOTE: this is horrible. Any simpler version ? last_move = None last_move1 = None lis = [ (0.0, 113, 167, 47), (0.32, 138, 159, 47), (0.44, 152, 144, 47), (0.48, 193, 148, 47), (0.6, 193, 148, 47), (0.76, 205, 138, 55), (0.88, 204, 121, 63), (0.92, 190, 31, 127), (1.2, 183, 59, 127), (1.4, 137, 22, 127), (1.52, 137, 22, 127), (1.72, 129, 67, 127), (1.88, 123, 69, 127), (2.04, 131, 123, 63), (2.24, 130, 148, 63), (2.48, 130, 148, 63), (2.8, 138, 180, 63), (3.0, 138, 180, 63), (3.2, 146, 192, 63), (3.28, 105, 91, 151), (3.44, 105, 91, 151), (3.72, 11, 48, 151), (3.96, 5, 78, 151), (4.32, 4, 134, 1), (4.6, 149, 184, 48), (4.8, 145, 188, 48), (5.0, 154, 217, 48), (5.08, 163, 199, 48), (5.2, 163, 199, 48), (5.32, 164, 187, 48), (5.48, 163, 200, 48), (5.76, 163, 200, 48), (5.96, 173, 199, 48), (6.0, 133, 172, 48), (6.04, 128, 165, 48), (6.28, 128, 165, 48), (6.4, 129, 180, 48), (6.52, 133, 166, 48), (6.64, 133, 166, 48), (6.88, 144, 183, 48), (7.0, 153, 174, 48), (7.16, 153, 174, 48), (7.24, 153, 174, 48), (7.28, 253, 65, 104), (7.64, 253, 65, 104), (7.8, 279, 116, 80), (8.0, 290, 105, 80), (8.24, 288, 124, 80), (8.44, 243, 102, 80), (8.56, 243, 102, 80), (8.8, 202, 107, 80), (8.84, 164, 27, 104), (9.0, 164, 27, 104), (9.12, 121, 9, 104), (9.28, 77, 33, 104), (9.32, 52, 23, 104), (9.48, 52, 23, 104), (9.64, 33, 46, 104), (9.8, 93, 49, 104), (9.92, 93, 49, 104), (10.16, 173, 19, 104), (10.2, 226, 173, 48), (10.36, 226, 173, 48), (10.48, 211, 172, 48), (10.64, 208, 162, 48), (10.92, 220, 171, 48), ] lis1 = [ (0.0, 113, 167, 47), (0.32, 138, 159, 47), (0.44, 152, 144, 47), (0.48, 193, 148, 47), (0.6, 193, 148, 47), (0.76, 205, 138, 55), (0.88, 204, 121, 63), (0.92, 190, 31, 127), (1.2, 183, 59, 127), (1.4, 137, 22, 127), (1.52, 137, 22, 127), (1.72, 129, 67, 127), (1.88, 123, 69, 127), (2.04, 131, 123, 63), (2.24, 130, 148, 63), (2.48, 130, 148, 63), (2.8, 138, 180, 63), (3.0, 138, 180, 63), (3.2, 146, 192, 63), (3.28, 105, 91, 151), (3.44, 105, 91, 151), (3.72, 11, 48, 151), (3.96, 5, 78, 151), (4.32, 4, 134, 1), (4.6, 149, 184, 48), (4.8, 145, 188, 48), (5.0, 154, 217, 48), (5.08, 163, 199, 48), (5.2, 163, 199, 48), (5.32, 164, 187, 48), (5.48, 163, 200, 48), (5.76, 163, 200, 48), (5.96, 173, 199, 48), (6.0, 133, 172, 48), (6.04, 128, 165, 48), (6.28, 128, 165, 48), (6.4, 129, 180, 48), (6.52, 133, 166, 48), (6.64, 133, 166, 48), (6.88, 144, 183, 48), (7.0, 153, 174, 48), (7.16, 153, 174, 48), (7.24, 153, 174, 48), (7.28, 253, 65, 104), (7.64, 253, 65, 104), (7.8, 279, 116, 80), (8.0, 290, 105, 80), (8.24, 288, 124, 80), (8.44, 243, 102, 80), (8.56, 243, 102, 80), (8.8, 202, 107, 80), (8.84, 164, 27, 104), (9.0, 164, 27, 104), (9.12, 121, 9, 104), (9.28, 77, 33, 104), (9.32, 52, 23, 104), (9.48, 52, 23, 104), (9.64, 33, 46, 104), (9.8, 93, 49, 104), (9.92, 93, 49, 104), (10.16, 173, 19, 104), (10.2, 226, 173, 48), (10.36, 226, 173, 48), (10.48, 211, 172, 48), (10.64, 208, 162, 48), (10.92, 220, 171, 48), ] def posi(t): global last_move if len(lis) == 0: return (last_move[1], last_move[2]) if t >= lis[0][0]: last_move = item = lis.pop(0) return (item[1], item[2]) else: if len(lis) > 0: dura = lis[0][0] - last_move[0] now = t - last_move[0] w = (lis[0][1] - last_move[1]) * (now / dura) h = (lis[0][2] - last_move[2]) * (now / dura) # print t, last_move[1] + w, last_move[2] + h return (last_move[1] + w, last_move[2] + h) return (last_move[1], last_move[2]) def size(t): global last_move1 if len(lis1) == 0: return (last_move1[3], last_move1[3] * 1.33) if t >= lis1[0][0]: last_move1 = item = lis1.pop(0) return (item[3], item[3] * 1.33) else: if len(lis) > 0: dura = lis1[0][0] - last_move1[0] now = t - last_move1[0] s = (lis1[0][3] - last_move1[3]) * (now / dura) nsw = last_move1[3] + s nsh = nsw * 1.33 # print t, nsw, nsh return (nsw, nsh) return (last_move1[3], last_move1[3] * 1.33) avatar = VideoFileClip("media/big_buck_bunny_432_433.webm", has_mask=True) avatar.audio = None maskclip = ImageClip("media/afterimage.png", ismask=True, transparent=True) avatar.set_mask(maskclip) # must set maskclip here.. concatenated = concatenate_videoclips([avatar] * 3) tt = VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0, 3) # TODO: Setting mask here does not work: # .set_mask(maskclip).resize(size)]) final = CompositeVideoClip( [tt, concatenated.set_position(posi).resize(size)]) final.duration = tt.duration final.write_videofile(os.path.join(TMP_DIR, "issue_334.mp4"), fps=10)
clip = mpy.ImageSequenceClip(frames, fps=fps) clip_name = "%s/ants_%d.mp4" % (tmp_dir, vid_count) clip.write_videofile(clip_name, fps=fps) clips.append(clip_name) # Make sure to remove references to the data we just loaded # Run the GC to cleanup the frames we just created, now that they # have been written to a video frames = [] clip = None gc.collect() # TODO no reason to free then reallocate all the frames, just clear # them count = 0 vid_count += 1 # write any remaining frames if len(frames) != 0: clip = mpy.ImageSequenceClip(frames, fps=fps) clip_name = "%s/ants_%d.mp4" % (tmp_dir, vid_count) clip.write_videofile(clip_name, fps=fps) clips.append(clip_name) inpt.close() clips = map(lambda name: mpy.VideoFileClip(name), clips) final_clip = mpy.concatenate_videoclips(clips) final_clip.write_videofile(("%s/ants.mp4" % dump_dir), fps=fps)
def video_sequencify(song_name): #we will take all the gif videos and put them into one dictionary in which the #keys will be the timestamp in tuple form such as (begin, end) and the values will be the mp4 arrays of each key word #we will iterate through the keys in a for loop #we will set the bounds of videos between their respective time stamps #if the length of the timestamp exceeds first gif-video, we iterate to second then third, before looping back to first #Based on this order, we will then create an array of these full and semi-cropped videos which we will then concatenate #after concatenation, we will combine with audio and return result. gifvid_dict = {} PATH_TO_JSON = 'C:/Users/jaysh/Desktop/VandyHacks/Hack Prototype/aeneastest/output/' + song_name + '.json' for file in os.listdir( 'C:/Users/jaysh/Desktop/VandyHacks/Hack Prototype/GIFS'): gifToMp4(file) os.chdir('C:/Users/jaysh/Desktop/VandyHacks/Hack Prototype/Videos') with open(PATH_TO_JSON, 'r') as jsonfile: json_content = json.load( jsonfile) # this is now in memory! you can use it outside 'open' i = 0 for json_entry in json_content['fragments']: key = (float(json_entry['begin']), float(json_entry['end'])) value = [] for file in os.listdir( 'C:/Users/jaysh/Desktop/VandyHacks/Hack Prototype/Videos'): if json_entry['key_word'] in file: #this is just storing file names not mp4 files value.append(file) gifvid_dict[key] = value for key in gifvid_dict: clip0 = mp.VideoFileClip(gifvid_dict[key][0]) if clip0.duration >= key[1] - key[0]: ## gifvid_dict[key][0].duration = key[1] - key[0] new_name1 = gifvid_dict[key][0][:-4] + '3.mp4' ffmpeg_extract_subclip(gifvid_dict[key][0], 0.0, key[1] - key[0], targetname=new_name1) gifvid_dict[key] = gifvid_dict[key][0] elif clip0.duration < key[1] - key[0]: temp_array = [] difference = key[1] - key[0] i = 0 while (difference > 0): clipi = mp.VideoFileClip(gifvid_dict[key][i % 3]) difference -= clipi.duration temp_array.append(gifvid_dict[key][i % 3]) i += 1 ## temp[i-1].duration += difference clipiminus = mp.VideoFileClip(temp_array[i - 1]) new_name2 = temp_array[i - 1][:-4] + str(i) + '.mp4' ffmpeg_extract_subclip(temp_array[i - 1], 0.0, clipiminus.duration + difference, targetname=new_name2) #now we have to merge these clips close_clip(clipiminus) close_clip(clip0) mp4_array = [] for file in temp_array: clip = mp.VideoFileClip(file) mp4_array.append(clip) close_clip(clip) final_clip = concatenate_videoclips(mp4_array, method='compose') final_clip.write_videofile(temp_array[0][:-5] + '.mp4') close_clip(final_clip) gifvid_dict[key] = temp_array[0][:-5] + '.mp4' mega_array = gifvid_dict.values() mega_mp4_array = [] for file in mega_array: clip = mp.VideoFileClip(file) mega_mp4_array.append(clip) close_clip(clip) super_final_clip = concatenate_videoclips(mega_mp4_array, method='compose') super_final_clip.write_videofile('super_final_gif.mp4')
def generate_video(video_ids, options=None, cache='./cache'): # Define default options and override anything explicitly set opts = { 'num_clips': 20, 'min_clip_len': 0.04, 'clip_len_var': 0.4, 'width': 1920, 'height': 1080, 'fps': 30, 'codec': 'libx264', 'bitrate': '8000k', 'audio_bitrate': '384k', 'output': '', } if options is not None: opts.update(options) # Load the videos paths = [ os.path.join(cache, 'videos', '{}.mp4'.format(video_id)) for video_id in video_ids ] videos = [editor.VideoFileClip(path) for path in paths] # Segment the videos into random clips clips = [] for iteration in xrange(opts['num_clips']): clip = sample(videos, 1)[0] length = ( random()*opts['clip_len_var'] + opts['min_clip_len'] ) start_time = random()*(clip.duration - length) print 'Clip', iteration, ':', print 'Slicing video', video_ids[videos.index(clip)], print '(', start_time, 'to', start_time + length, ')' clips += [ clip .subclip(start_time, start_time + length) .set_fps(opts['fps']) .resize((opts['width'], opts['height'])) ] # Combine the clips result = editor.concatenate_videoclips(clips) # Output result to a file title = ( 'output/generated_{}.mp4'.format( md5.new(''.join(video_ids)).hexdigest() ) if not len(opts['output']) else opts['output'] ) # Create the output dir if it doesn't exist output_dir = os.path.dirname(title) if not os.path.exists(output_dir): os.mkdir(output_dir) print 'Saving result:', title result.write_videofile( title, fps=opts['fps'], codec=opts['codec'], bitrate=opts['bitrate'], audio_bitrate=opts['audio_bitrate'] )
def Merge(videoList,endTime,gap,lastCount): endTime=gap*int(endTime/gap) videoMap={} queue=[] countMap={} for i in range(0,endTime,gap): videoMap[i]=[] index=0 for vid in videoList: startKey=gap*math.ceil((vid.start_time)/gap) endKey=gap*math.floor((vid.end_time)/gap) for i in range(startKey,endKey,gap): videoMap[i].append(index) countMap[index]=0 index=index+1 print(startKey) print(endKey) print(vid.start_time) print(vid.end_time) print("---------------------------") print(videoMap) #Getting first video #***availList=videoMap[0].copy() availList = copy.deepcopy(videoMap[0]) vidSelectIndex=availList[randint(0,len(availList)-1)] mergeVideo=VideoFileClip(videoList[vidSelectIndex].video_clip).subclip(0,gap).resize((1280,720)) #mergeAudio=mergeVideo.audio queue.append(vidSelectIndex) countMap[vidSelectIndex]=countMap[vidSelectIndex]+1 for time in range(gap,endTime,gap): print("time is ",str(time)) if len(videoMap[time])!=0: #***availList=videoMap[time].copy() availList = copy.deepcopy(videoMap[time]) #NEED TO BE CONVERTED IN BINARY SEARCH TO IMPROVE TIME EFFICIENCY inqueue=[] for i in queue: for j in availList: if i==j: inqueue.append(i) break #print("availList is ") #print(availList) #print("queue is") #print(queue) #print("Elements in queue and available list is") #print(inqueue) if len(inqueue)==len(availList): vidSelectIndex=inqueue[0] tempVideo=videoList[vidSelectIndex] tempClip=VideoFileClip(tempVideo.video_clip).subclip(time-tempVideo.start_time,time-tempVideo.start_time+gap).resize((1280,720)) #tempAudio=tempClip.audio #mergeAudio=concatenate_audioclips([mergeAudio,tempAudio]) mergeVideo = concatenate_videoclips([mergeVideo , tempClip]) #tempClip.close() #tempAudio.close() queue.remove(vidSelectIndex) queue.append(vidSelectIndex) countMap[vidSelectIndex]=countMap[vidSelectIndex]+1 else: for i in inqueue: availList.remove(i) minOccur=countMap[availList[0]] for i in range(1,len(availList)): if countMap[availList[i]]<minOccur: minOccur=countMap[availList[i]] minOccurList=[] for i in availList: if countMap[i]==minOccur: minOccurList.append(i) vidSelectIndex=minOccurList[randint(0,len(minOccurList)-1)] tempVideo=videoList[vidSelectIndex] tempClip=VideoFileClip(tempVideo.video_clip).subclip(time-tempVideo.start_time,time-tempVideo.start_time+gap).resize((1280,720)) #tempAudio=tempClip.audio #mergeAudio=concatenate_audioclips([mergeAudio,tempAudio]) mergeVideo = concatenate_videoclips([mergeVideo , tempClip]) #tempClip.close() #tempAudio.close() if len(queue)==lastCount: del queue[0] queue.append(vidSelectIndex) else: queue.append(vidSelectIndex) countMap[vidSelectIndex]=countMap[vidSelectIndex]+1 #tempAudio.close() #mergeVideo=mergeVideo.set_audio(mergeAudio) mergeVideo.write_videofile("mashup.mp4",audio=False) #return mergeVideo
cap = cv2.VideoCapture(sys.argv[1]) cap.set(cv2.CAP_PROP_POS_FRAMES, float(sys.argv[2])) framenum = int(sys.argv[2]) vsize = (int(1504), int(80)) out = cv2.VideoWriter(sys.argv[4], cv2.VideoWriter_fourcc(*'MP4V'), 60.0, vsize) while (1): #dateTimeObj = datetime.now() #timestampStr = dateTimeObj.strftime("%H:%M:%S.%f") #print('start: ', timestampStr) ret, frame = cap.read() cropped = frame[starty:stopy, startx:stopx] out.write(cropped) print(framenum) framenum += 1 print(framenum) if framenum > int(sys.argv[3]): break cap.release() out.release() cv2.destroyAllWindows() from moviepy.editor import VideoFileClip, concatenate_videoclips clip_1 = VideoFileClip("0.mp4") clip_2 = VideoFileClip("1.mp4") final_clip = concatenate_videoclips([clip_1, clip_2]) final_clip.write_videofile("final.mp4")
l_list=lyrics.split() tempo_arr=[0.46488280533333337, 0.7943834453333334, 0.7903162773333334, 0.7736423893333334, 1.4761018346666668, 0.7736423893333334, 1.4761018346666668, 1.153845056, 0.9901127786666668, 0.3596191946666667, 1.0890205013333334, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5445102506666667, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5987591680000001, 0.6562531484444445, 0.5465413333333333, 0.9132993333333334, 0.46488280533333337, 0.7943834453333334, 0.7903162773333334, 0.7736423893333334, 1.4761018346666668, 0.7736423893333334, 1.4761018346666668, 1.153845056, 0.9901127786666668, 0.3596191946666667, 1.0890205013333334, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5445102506666667, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5987591680000001, 0.6562531484444445, 0.5465413333333333, 0.9132993333333334, 0.46488280533333337, 0.7943834453333334, 0.7903162773333334, 0.7736423893333334, 1.4761018346666668, 0.7736423893333334, 1.4761018346666668, 1.153845056, 0.9901127786666668, 0.3596191946666667, 1.0890205013333334, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5445102506666667, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5987591680000001, 0.6562531484444445, 0.5465413333333333, 0.9132993333333334, 0.46488280533333337, 0.7943834453333334, 0.7903162773333334, 0.7736423893333334, 1.4761018346666668, 0.7736423893333334, 1.4761018346666668, 1.153845056, 0.9901127786666668, 0.3596191946666667, 1.0890205013333334, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5445102506666667, 0.7763538346666666, 0.8760920000000001, 0.6905781120000001, 1.1793786666666666, 3.2308872213333335, 0.5987591680000001, 0.6562531484444445, 0.5465413333333333, 0.9132993333333334, 0.46488280533333337, 0.7943834453333334, 2.3709488320000003] note_length=[0.3997867803837953, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.29984008528784645, 0.29984008528784645, 0.3997867803837953, 0.3997867803837953, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.29984008528784645, 0.29984008528784645, 0.3997867803837953, 0.3997867803837953, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.29984008528784645, 0.29984008528784645, 0.3997867803837953, 0.3997867803837953, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.3997867803837953, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.19989339019189764, 0.29984008528784645, 0.29984008528784645, 0.3997867803837953, 0.3997867803837953, 0.19989339019189764, 0.13326226012793177, 0.13326226012793177, 0.13326226012793177, 0.13326226012793177, 0.13326226012793177, 0.13326226012793177, 0.3997867803837953, 0.3997867803837953] sourcepath = r'C:/ytpmv/morshu/' # filepath clips=[] print(len(tempo_arr)) print(len(note_length)) print(len(l_list)) for i in range(len(tempo_arr)): source_clip=mp.VideoFileClip(sourcepath+'long/rtxvid/'+l_list[i]+'.mp4') sped_clip = source_clip.fx(vfx.speedx, tempo_arr[i]) sped_clip=source_clip clips.append(sped_clip.subclip(0, note_length[i])) ''' if i%70==69: if i//70 < 5: concat_clip = mp.concatenate_videoclips(clips, method="compose") concat_clip.write_videofile(sourcepath+'concatted '+str(i//70)+'.mp4') clips=[] ''' concat_clip = mp.concatenate_videoclips(clips, method="compose") concat_clip.write_videofile(sourcepath+'verse4.mp4') ''' print(len(clip_number)) print(len(tempo_arr)) '''
from moviepy.editor import VideoFileClip, concatenate_videoclips video_1 = VideoFileClip("WandaVision 1.mp4") video_2 = VideoFileClip("WandaVision 2.mp4") final_video= concatenate_videoclips([video_1, video_2]) final_video.write_videofile("WandaVision.mp4")
def main(): import argparse parser = argparse.ArgumentParser( description='TV Series Tools: Video' ) parser.add_argument('--input', '-i', dest='inputfile', required=True, help='file path to a file containing info on how to' ' cut the clips') parser.add_argument('--clips', '-c', dest='clipsdir', required=True, help='clips video files location') parser.add_argument('--output', '-o', dest='outputdir', required=True, help='directory name inside --clips directory in which' ' the cut clips will be rendered, or path to a single' ' output video file if --join is set') parser.add_argument('--join', '-j', dest='join', action='store_true', help='concat cut video clips') parser.add_argument('--video-fps', '-vf', dest='video_fps', type=int, help='video fps, defaults to {}' .format(DEFAULT_FPS)) parser.add_argument('--video-ext', '-ve', dest='video_ext', help='video file extension, defaults to {}' .format(DEFAULT_EXT)) parser.add_argument('--video-codec', '-vc', dest='video_codec', help='video codec, defaults to not set, which means' ' that moviepy will chose the codec automatically') parser.add_argument('--video-params', '-vp', dest='video_params', help='additional parameters for FFmpeg,' ' example: --video-params="-vf eq=gamma=1.5"') parser.add_argument('--resize-width', '-rw', dest='resize_width', type=int, help='resize width; you must set both --resize-width' ' and --resize-height') parser.add_argument('--resize-height', '-rh', dest='resize_height', type=int, help='resize height; you must set both --resize-width' ' and --resize-height') parser.add_argument('--limit', '-l', dest='limit', type=int, default=DEFAULT_LIMIT, help='process only first <limit> clips') parser.add_argument('--speed', '-sp', dest='speed', type=float, help='speed of the composition; the standard speed' ' will be multiplied by this number, hence' ' 1 = normal speed, 0.5 = half the normal speed,' ' 3 = three times as fast, etc.') parser.add_argument('--subtitles', '-sb', dest='subtitles', action='store_true', help='render subtitles') parser.add_argument('--intertitles', '-it', dest='intertitles', action='store_true', help='render itertitles') parser.add_argument('--intertitle-color', '-ic', dest='intertitle_color', default=DEFAULT_TEXT_COLOR, help='itertitle color; default \'{}\'' .format(DEFAULT_TEXT_COLOR)) parser.add_argument('--intertitle-font', '-if', dest='intertitle_font', default=DEFAULT_TEXT_FONT, help='itertitle font; default \'{}\'' .format(DEFAULT_TEXT_FONT)) parser.add_argument('--intertitle-fontsize', '-is', dest='intertitle_fontsize', type=int, default=DEFAULT_INTERTITLE_FONTSIZE, help='itertitle font size in px; default \'{}\'' .format(DEFAULT_INTERTITLE_FONTSIZE)) parser.add_argument('--intertitle-position', '-ip', dest='intertitle_position', default=DEFAULT_INTERTITLE_POSITION, help='itertitle position; default \'{}\'' .format(DEFAULT_INTERTITLE_POSITION)) parser.add_argument('--intertitle-duration', '-id', dest='intertitle_duration', type=int, default=DEFAULT_INTERTITLE_DURATION, help='itertitle duration in seconds; default \'{}\'' .format(DEFAULT_INTERTITLE_DURATION)) parser.add_argument('--fadeout', '-fd', dest='fadeout', type=int, help='duration in milliseconds of a fadeout after each' ' clip; defaults to 0 meaning no fadeout') args = parser.parse_args() composition = listio.read_map(args.inputfile) if not composition: sys.exit(1) all_clips = [] cache_video_clips = {} for i, composition in enumerate(composition): if i == args.limit: print('LIMIT {} HIT'.format(args.limit)) break file_path = os.path.join(args.clipsdir, composition[0]) print('CLIP {} "{}"'.format(i, file_path)) cut_start = parse_duration(composition[1]) cut_end = parse_duration(composition[2]) if not composition[1] or not composition[2]: print(' SKIP no cut defined') continue print(' CUT {} --> {}'.format(cut_start, cut_end)) if composition[0] in DEBUG_SKIP: print(' SKIP clip found in DEBUG_SKIP list') continue if not os.path.isfile(file_path): print(' SKIP file not found') continue if not args.join: params = [] if args.intertitles: params.append('i') clip_file_path = format_clip_file_path( file_path, args.outputdir, cut_start, cut_end, ext=args.video_ext, params=params) print(' OUTPUT "{}"'.format(clip_file_path)) if os.path.isfile(clip_file_path): print(' SKIP output exists "{}"'.format(clip_file_path)) continue if file_path not in cache_video_clips: cache_video_clips[file_path] = VideoFileClip(file_path) video_clip = cache_video_clips[file_path] if cut_start and cut_end: video_sub_clip = video_clip.subclip(cut_start, cut_end) else: video_sub_clip = video_clip if args.video_fps: video_sub_clip = video_sub_clip.set_fps(args.video_fps) composite_clip = video_sub_clip if args.resize_width and args.resize_height: composite_clip = filter_resize( composite_clip, args.resize_width, args.resize_height) if args.subtitles: raise NotImplementedError # TODO: Figure out what subtitles path should be. # composite_clip = filter_add_subtitles( # composite_clip, # subtitles_path) if args.intertitles: text = composition[3] print(' INTERTITLE {}'.format(text)) if args.resize_width and args.resize_height: intertitle_size_w = args.resize_width intertitle_size_h = args.resize_height else: intertitle_size_w = composite_clip.w intertitle_size_h = composite_clip.h composite_clip = filter_add_intertitle( composite_clip, text, args.intertitle_color, args.intertitle_font, args.intertitle_fontsize, args.intertitle_position, args.intertitle_duration, intertitle_size_w, intertitle_size_h) if args.speed: composite_clip = filter_adjust_speed( composite_clip, args.speed) if args.fadeout: composite_clip = filter_fadeout( composite_clip, args.fadeout) if args.join: all_clips.append(composite_clip) else: render( composite_clip, clip_file_path, fps=args.video_fps, ext=args.video_ext, codec=args.video_codec, ffmpeg_params=args.video_params) if args.join: joined_clip = concatenate_videoclips(all_clips) render( joined_clip, args.outputdir, fps=args.video_fps, ext=args.video_ext, codec=args.video_codec, ffmpeg_params=args.video_params) sys.exit()
def write_to_video(arr): # use for testing and making sure it outputs correctly images=[ImageClip(x).set_duration(0.05) for x in arr] concat_clip = concatenate_videoclips(images, method="compose") concat_clip.write_videofile('test.mp4', fps=15)
text = mpy.VideoClip(render_text, duration=DURATION) # load audio files to add them to video silence_half = mpy.AudioFileClip("0.5-second-of-silence.mp3") silence_sec = mpy.AudioFileClip("1-second-of-silence.mp3") video_audio1 = mpy.AudioFileClip("untranslated.mp3") video_audio2 = mpy.AudioFileClip("translated.mp3") video_audio3 = mpy.AudioFileClip("untranslated_fast.mp3") # put all sounds together video_audio = mpy.concatenate_audioclips([ silence_half, video_audio1, silence_half, video_audio2, silence_half, video_audio3, silence_sec ]) video = mpy.CompositeVideoClip([text.set_position('center')], size=VIDEO_SIZE).on_color( color=WHITE, col_opacity=1).set_audio(video_audio) video.write_videofile("video-" + str(v_counter) + ".mp4", fps=10) v_counter = v_counter + 1 # combine all videos in one clips = [] for i in range(1, len(word_list) + 1): clips.append(mpy.VideoFileClip("video-" + str(v_counter) + ".mp4")) final_clip = mpy.concatenate_videoclips([*clips]) final_clip.write_videofile("final_clip.mp4")
def generate_video(test=True): video_list = [] sub_video = subscribers_video() video_list.append(sub_video) week = Week.objects.all()[0] for i, position in enumerate(week.position_set.all()): if i == 2 and test: break if i == 50: break video = VideoFileClip(join(settings.VIDEO_ASSETS, "{} - {}.mp4".format(position.song.name, position.song.artist))).set_duration(10) # video = audio_fadeout(video, 2) graph = (ImageClip(join(settings.IMAGES, "graph{}.png".format(position.position))). set_duration(duration)) graph = graph.set_pos(lambda t: ( (max(1445, 1800 - t * 700), (5, int(20 - 400*t + 400*13.2))[t > 13.2]))) #### w, h = video.size position_image = ImageClip( join(settings.IMAGES, "pos{}.png".format(position.position)) ).set_duration(duration) change_image = ImageClip( join(settings.IMAGES, "change{}.png".format(position.position)) ).set_duration(duration) lower_third_image = ImageClip( join(settings.IMAGES, "lower_third{}.png".format(position.position)) ).set_duration(duration) # I am *NOT* explaining the formula, understands who can/want. # txt_mov = txt_col.set_pos(lambda t: (max(w/30, int(w-0.5*w*t)), max(5*h/6, int(100*t))) ) txt_mov = position_image.set_pos( lambda t: (min(0, -position_image_size['x'] + t * 400), (1080 - 20 - position_image_size['y'], int(1060 - position_image_size['y'] + 380*t - 380*13))[t > 13])) change_image_mov = change_image.set_pos( lambda t: (min(change_image_size['x'], -position_image_size['x'] + t * 700), (1080 - 20 - position_image_size['y'], int(1060 - position_image_size['y'] + 400*t - 400*13.2))[t > 13.2])) lower_third_mov = lower_third_image.set_pos( lambda t: (min(change_image_size['x'] + position_image_size['y'], -lower_third_size['x'] + t * 2500), (1080 - 20 - lower_third_size['y'], int(1060 - lower_third_size['y'] + 430*t - 430*13.4))[t > 13.4])) gainer_mov = chart_highlights(week, position.position) final = CompositeVideoClip([video, lower_third_mov, change_image_mov, txt_mov, graph] + gainer_mov, size=((1920, 1080))).fadeout(0.2) video_list.append(final) FINAL = concatenate_videoclips(list(reversed(video_list))) FINAL.write_videofile(join(settings.VIDEOS, "billboard_top_50_this_week.mp4"), fps=24, codec='libx264')
def render_vines(data, channel=None): ''' Individually renders all of the vines specified in data with the username, description, order, and optionally channel icon. Vines that have already been rendered and exist in render/ get skipped. Vines are intercut with a random second of a longer static source video, as well as a second of a static WAV file. data Pandas DataFrame: contains the vine metadata channel channel name used to identify icon ''' #verify files exist in cache folder datav = exists(data, 'cache') #files already rendered get skipped datavrid = list(exists(data, 'render')['id'].astype(basestring)) #adds data so that the order of the videos can be printed on screen datav['order'] = datav.index.values for i, row in datav.iterrows(): #replaces all instances of NaN with a blank string row = row.replace(np.nan, '', regex=True) vineid = row['id'] if vineid not in datavrid: vine = (vfc_from_file(vineid, 'cache').on_color(size=(854, 480), color=(20, 20, 25), pos='center') .resize((1280, 720))) #encodes text as ascii for textclip creation user = enc_str(row['username']).upper() user = re.sub('[_]+', ' ', user) user = re.sub('[()]+', '', user) desc = enc_str(row['description']).upper() desc = re.sub(' #[a-zA-Z0-9]+', '', desc) desc = re.sub('\W{2,}', ' ', desc) user = '******' + user #lambda to create text clip tc = lambda text, size, xline: (mpe.TextClip(txt=text, size=(270, 720), method='caption', align='center', font='Heroic-Condensed-Bold', fontsize=size, color='white', interline=xline) .set_duration(vine.duration)) user_osd = tc(user, 85, 11).set_position((0, 25)) desc_osd = tc(desc, 60, 0).set_position('right') #gets icon if it exists channel_icon_path = ap('meta/icons/' + channel + '.png') channel_icon_size = (144, 144) channel_icon = '' if osp.isfile(channel_icon_path): channel_icon = mpe.ImageClip(str(channel_icon_path), transparent=True) channel_icon = (channel_icon.set_duration(vine.duration) .resize(channel_icon_size) .set_position((0, 5))) #vine order number within video, autoscaling text size when the #order hits 3 digits. the order is zero-based, and we need to make #it one-based, which is why we increment the rendered order #and why the check is for greaters than 98 order_text_size = 125 if row['order'] > 98: order_text_size = 100 order = (mpe.TextClip(txt=str(row['order'] + 1), size=channel_icon_size, font='Heroic-Condensed-Bold', fontsize=order_text_size, align='center', color='red') .set_position((140, 20)) .set_duration(vine.duration)) #grabs a random second from our static video sourced from #http://www.videezy.com/elements-and-effects/242-tv-static-hd-stock-video static_v = vfc_from_file('static', '').resize(vine.size) randsec = random.randint(0, int(static_v.duration) - 2) static_v = static_v.subclip(randsec, randsec + 1) #grab the audio for the static and set it to the video static_a = mpe.AudioFileClip(ap('static.wav')).volumex(0.3) static = static_v.set_audio(static_a) parts = [vine, user_osd, desc_osd, order] if channel_icon: parts.append(channel_icon) #composite the parts on the sides of the video #then concatenate with the static intercut comp = mpe.CompositeVideoClip(parts) comp = mpe.concatenate_videoclips([comp, static]) if not osp.isdir(ap('render/')): os.mkdir(ap('render/')) #start the render path = ap('render/' + vineid + '.mp4') write_x264(comp, path) #comp.save_frame(path) else: print('skipping ' + vineid)
def __init__(self, sessID, inD, workD, outD): self.sessID = sessID self.inD = inD self.workD = workD vidList = self.vidList(inD) spreadCalc = DEFAULT_REACH_ITER self.chulenms = DEFAULT_PERIOD self.chuLenS = self.chulenms / 1000 apList = list() videoChunks = list() tmpVideoChunks = list() length = len(vidList) import concurrent import concurrent.futures import threading #print("extracting audio") # create chunk lists #self.chunkList = list() #self.tmpCounter = 0 #print("chunking audio") #for i in range(length): # self.extractAudio(i, vidList) # self.chunkAudio(vidList[i]) # run code once chunks are ready print(f"vidList [s={len(vidList)}] = {vidList}") UUID = list() for i in range(len(vidList)): UUID.append(randomString(7)) # make audio audioFileList = list() #[None] * len(vidList) #executor = concurrent.futures.ProcessPoolExecutor(61) #futures = [executor.submit(self.makeAudio, i, vidList, audioFileList, UUID[i]) #for i in range( len(vidList) )] ##run code in the meantime #concurrent.futures.wait(futures) for i in range(len(vidList)): self.makeAudio(i, vidList, audioFileList, UUID[i]) #print(f"audio file list: {audioFileList}") # make chunks #totalLength = 0.0 #for video in vidList: # totalLength += 1000.0 * ( video.getFullVideo().duration ) #totalLength = int( 0.5 + ( 1 + ( totalLength / self.chulenms ) ) ) audioChunksList = list() #[None] * totalLength #audioSegs = list() #for audioFile in audioFileList: # tmp = AudioSegment.from_mp3( audioFile.aPath() ) # audioSegs.append( tmp ) #executor = concurrent.futures.ProcessPoolExecutor(61) #futures = [executor.submit(self.convertMP3toChunks, i, audioFileList, audioChunksList, UUID[i]) #for i in range( len(audioFileList) )] ##run code in the meantime #concurrent.futures.wait(futures) for i in range(len(audioFileList)): self.convertMP3toChunks(i, audioFileList, audioChunksList, UUID[i]) #print(f"audio chunk list: {audioChunksList}") ######exit(1) # floor values executor = concurrent.futures.ProcessPoolExecutor(61) futures = [ executor.submit(self.floorChunks, i, audioChunksList) for i in range(len(audioChunksList)) ] #run code in the meantime concurrent.futures.wait(futures) # calculate sv #executor = concurrent.futures.ProcessPoolExecutor(61) #futures = [executor.submit(self.calculateSV, audioChunksList) #for i in range( len(audioChunksList) )] #run code in the meantime #concurrent.futures.wait(futures) #print(f"length of audioChunks = {len(audioChunksList)}") #executor = concurrent.futures.ProcessPoolExecutor(10) #futures = [executor.submit(try_multiple_operations, group) # for group in grouper(5, items)] #concurrent.futures.wait(futures) print(f"calculating spread value: {0}/{len(audioChunksList)}\r", end="") audioDataList = [None] * len(audioChunksList) for i in range(len(audioChunksList)): self.helperCalculateSV(i, audioChunksList, audioDataList, -500) print(f"\r\n") # filter chunks #filteredChunksList = list( map(lambda x: (True, x) if x[2] > DEFAULT_THRESHOLD or x[3] > DEFAULT_REACH_THRESH else (False, x), audioChunksList) ) #####d = dict() #####d.values() = audioDataList ######print(f"{audioDataList[0]}\n{audioDataList[10]}") #####exit() filteredChunksList = list( map( lambda x: (True, *x) if x[2] > -400 or x[5] > -400 else (False, *x), audioDataList)) #print(f"length of filtered = {len(filteredChunksList)}") # combine video print(f"{audioDataList[0]}\n{audioDataList[10]}") videoChunksList = list() #[None] * len(filteredChunksList) #executor = concurrent.futures.ProcessPoolExecutor(61) #futures = [executor.submit(self.generateSubclips, i, filteredChunksList, videoChunksList, UUID[i]) #for i in range( len(audioChunksList) )] #run code in the meantime #concurrent.futures.wait(futures) i = 0 while i < len(filteredChunksList): #print(f"{filteredChunksList[i]}") if filteredChunksList[i][0]: j = i while filteredChunksList[j][0] and filteredChunksList[i][ 1] == filteredChunksList[j][ 1] and j < len(filteredChunksList) - 1: #print(f"{filteredChunksList[j]}") j += 1 #print(f"{i} {j} {len(filteredChunksList)}") video = vidList[filteredChunksList[i][1]].getFullVideo() a = max(0, filteredChunksList[i][3] / 1000) b = min(video.duration, filteredChunksList[j][4] / 1000) videoChunksList.append(video.subclip(min(a, b), max( a, b))) # fixes mpye bug with frame rounding up i = j i += 1 #final video print( f"creating final video... ({ len(videoChunksList) }) {videoChunksList[0:3]}" ) outputMovie = None if len(videoChunksList) > 1: outputMovie = mpye.concatenate_videoclips(videoChunksList, method='compose') else: outputMovie = videoChunksList[0] #outputMovie = mpye.concatenate_videoclips(videoChunksList) outputMovie.write_videofile( outD.append(f'{randomString(3)} -- output.mp4').aPath(), codec='libx265', audio_codec='libmp3lame', audio_bitrate='96k', preset='fast', threads=16) if cleanup: fSet = list(os.listdir(k.aPath())) for file in fSet: file.delete()
def create_simple_video(request): #load images image1 = ImageClip("media/real pics/1.jpeg").set_duration(2) image2 = ImageClip("media/real pics/2.jpeg").set_duration(2) image3 = ImageClip("media/real pics/3.jpeg").set_duration(2) image4 = ImageClip("media/real pics/4.jpeg").set_duration(2) image5 = ImageClip("media/real pics/5.jpeg").set_duration(2) image6 = ImageClip("media/real pics/6.jpeg").set_duration(2) image7 = ImageClip("media/real pics/7.jpeg").set_duration(2) image8 = ImageClip("media/real pics/8.jpeg").set_duration(2) image9 = ImageClip("media/real pics/9.jpeg").set_duration(2) image10 = ImageClip("media/real pics/10.jpeg").set_duration(2) #concatenate clips, play one clip after the other image_clips = concatenate_videoclips([image1.fadein(.5).fadeout(.5), image2.fadein(.5).fadeout(.5), image3.fadein(.5).fadeout(.5), image4.fadein(.5).fadeout(.5), image5.fadein(.5).fadeout(.5), image6.fadein(.5).fadeout(.5), image7.fadein(.5).fadeout(.5), image8.fadein(.5).fadeout(.5), image9.fadein(.5).fadeout(.5), image10.fadein(.5).fadeout(.5)]) title_clip = (TextClip("Just Back From...", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(image_clips.w, image_clips.h)) .margin(top=5, opacity=0) .set_duration(3).fadein(.5).fadeout(.5) .set_position(("center", "top"))) stats_clip = (TextClip("See Santi's recent trip of 1,836 round trip miles, with stops..", fontsize=35, font="Century-Schoolbook-Roman", color="white", kerning=-2, interline=-1, bg_color='#e04400', method='caption', align='center', size=(image_clips.w, image_clips.h)) .margin(top=5, opacity=0) .set_duration(3).fadein(.5).fadeout(.5) .set_position(("center", "top"))) final_clip = concatenate_videoclips([title_clip, image_clips, stats_clip], method="compose", padding=-1) audio_clip = AudioFileClip("media/music.aac").subclip(0, final_clip.duration) final_clip = final_clip.set_audio(audio_clip).afx(afx.audio_fadeout, 1.0) final_clip.write_videofile('videos/myPicsVideo.mp4', fps=23, codec='libx264', audio_bitrate='1000k', bitrate='4000k') html = "<html><body><div>Video successfully created<div><a href='http://localhost:8000'><button>Back</button></a></body></html>" return HttpResponse(html)