def generate(self, avatars, text, usernames, kwargs): name = uuid.uuid4().hex + '.gif' @after_this_request def remove(response): # pylint: disable=W0612 try: os.remove(name) except (FileNotFoundError, OSError, PermissionError): pass return response clip = VideoFileClip("assets/kowalski/kowalski.gif") text = TextClip(text, fontsize=36, method='caption', size=(245, None), align='West', color='black', stroke_color='black', stroke_width=1, font='Verdana').set_duration(clip.duration) text = text.set_position((340, 65)).set_duration(clip.duration) text = rotate(text, angle=10, resample='bilinear') video = CompositeVideoClip([clip, text]).set_duration(clip.duration) video.write_gif(name) clip.close() video.close() return send_file(name, mimetype='image/gif')
def gifEngine(starttime, endtime, videofileloc, srtfileloc, outfileloc, logger='gifEngine.log'): logging.basicConfig(filename=logger, level=logging.DEBUG) logger = logging.getLogger(__name__) prolog.basic_config() # creating the initial GIF try: generator = lambda txt: TextClip( txt, font='Impact', fontsize=28, color='white') video = VideoFileClip(videofileloc) sub = SubtitlesClip(srtfileloc, generator).set_position( ("center", "bottom"), relative=True) composite = CompositeVideoClip([video, sub]) composite = composite.subclip(starttime, endtime) composite.write_gif(outfileloc, program='ffmpeg', opt='palettegen', logger=logger, verbose=True) # using new palettegen opt return 0 except (IOError, OSError) as err: return err
def process_video(filename, overwrite=False, max_width=1600, max_height=1600, max_file_size=5*1024**2, gifdir='gifs/'): gif_name = gifdir + filename + '.gif' if isfile(gif_name) and overwrite == False: print "Skipping " + gif_name + " as it already exists." return video_file = VideoFileClip(filename) try: assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0) video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8) except: print "Not resizing video." if video_file.h > max_height: video_file = video_file.resize(height=max_height) if video_file.w > max_width: video_file = video_file.resize(width=max_width) end_image = video_file.to_ImageClip(video_file.end-(1/video_file.fps)).set_duration(0.7) video_file = concatenate([video_file, end_image]) fadein_video_file = CompositeVideoClip( [video_file, (video_file.to_ImageClip() .set_duration(0.7) .crossfadein(0.4) .set_start(video_file.duration-0.7)), ] ) logo_size = video_file.h/6 text = ImageClip( expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration( video_file.duration).resize(width=logo_size).set_pos( (video_file.w-logo_size,video_file.h-logo_size)) composite_video_file = CompositeVideoClip([fadein_video_file, text]) composite_video_file.write_gif(gif_name,fps=20) fuzz_amt = 5 commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"' process = call(commands, shell=True) if getsize(gif_name) > max_file_size: process_video(filename, max_height=video_file.h*0.95, overwrite=True, gifdir=gifdir, max_file_size=max_file_size)
def generate(self, start_sub=None, end_sub=None, resize=.5, compression=20): subs = pysrt.open(self._subtitles_file) for iter_, sub in enumerate(subs, start_sub if start_sub else 0): if end_sub and iter_ > end_sub: break gif_file_name = os.path.join( self._output_dir, '{clip}_{iter}.gif'.format( clip=self._clip_file, iter=iter_ )) clip = ( self._clip .subclip((sub.start.minutes, sub.start.seconds), (sub.end.minutes, sub.end.seconds)) .resize(resize) ) compositions = [clip] subtitles_y_pos = self._subtitles_position_y for line in sub.text.split('\n'): subtitles_y_pos += 20 text = ( TextClip(line, fontsize=self.subtitles_font_size, color=self.subtitles_color, stroke_width=self.subtitles_stroke_width, stroke_color=self.subtitles_stroke_color, bg_color=self.subtitles_background_color, font=self.subtitles_font_name) .set_pos((self._subtitles_position_x, subtitles_y_pos)) .set_duration(clip.duration)) compositions.append(text) composition = CompositeVideoClip(compositions) composition.write_gif(gif_file_name, fuzz=compression)
def process_video(filename, video_height=480, overwrite=False): gif_name = 'gifs/' + filename + '.gif' if isfile(gif_name) and overwrite == False: print "Skipping " + gif_name + " as it already exists." return video_file = VideoFileClip(filename) try: assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0) video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8) except: print "Not resizing video." video_file = video_file.resize(height=video_height) end_image = video_file.to_ImageClip(0).set_duration(0.7) video_file = concatenate([video_file, end_image]) logo_size = video_height/6 text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size)) composite_video_file = CompositeVideoClip([video_file, text]) composite_video_file.write_gif(gif_name,fps=20) fuzz_amt = 5 commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"' process = call(commands, shell=True) if getsize(gif_name) > 5*1024**2: process_video(filename, video_height=video_height*0.75, overwrite=True)
class BaseClip: def __init__(self, clip): self.clip = CompositeVideoClip(clips=[clip]) self.duration = self.clip.duration def resize(self, new_size): """ Uses moviepy.video.fx.all.resize module :param new_size: Can be wither(width,height) in pixels or a float A scaling factor, like 0.5 A function of time returning one of these. """ self.clip = self.clip.resize(new_size) def crop(self, aspectRatio=None, x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None): """ Uses moviepy.video.fx.crop module. From documentation: Returns a new clip in which just a rectangular subregion of the original clip is conserved. x1,y1 indicates the top left corner and x2,y2 is the lower right corner of the croped region. All coordinates are in pixels. Float numbers are accepted. :param x1: top left corner x-axis :param y1: top left corner y-axis :param x2: bottom right corner x-axis :param y2: bottom right corner y-axis :param width: width of rectangle :param height: height of rectangle :param x_center: x-axis center :param y_center: y-axis center """ # If a preselected aspect ratio was selected. if aspectRatio: if not x_center: x_center = self.clip.w / 2 if not y_center: y_center = self.clip.h / 2 # Vertical/Phone ratio if aspectRatio == "vertical" or aspectRatio == "9:16" or aspectRatio == "phone": self.clip = self.clip.crop(width=self.clip.h * 9 / 16, height=self.clip.h, x_center=x_center, y_center=y_center) # Square ratio elif aspectRatio == "square" or aspectRatio == "1:1": self.clip = self.clip.crop(width=self.clip.h, height=self.clip.h, x_center=x_center, y_center=y_center) # 4:3/Letterbox ratio elif aspectRatio == "4:3" or aspectRatio == "1.33:1" or aspectRatio == "letterbox": self.clip = self.clip.crop(width=self.clip.h * 1.33, height=self.clip.h, x_center=x_center, y_center=y_center) # 16:9/Widescreen ratio elif aspectRatio == "16:9" or aspectRatio == "widescreen" or aspectRatio == "1.77:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 1.77, x_center=x_center, y_center=y_center) # 21:9/Cinemascope ratio elif aspectRatio == "cinemascope" or aspectRatio == "21:9" or aspectRatio == "2.33:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.33, x_center=x_center, y_center=y_center) # 2.35:1/Anamorphic ratio elif aspectRatio == "anamorphic" or aspectRatio == "2.35:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.35, x_center=x_center, y_center=y_center) # 2.39:1/DCI ratio elif aspectRatio == "DCI" or aspectRatio == "2.39:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.39, x_center=x_center, y_center=y_center) # 2.9:1/Digital IMAX ratio elif aspectRatio == "Digital IMAX" or aspectRatio == "2.9:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.9, x_center=x_center, y_center=y_center) # If an invalid aspect ratio was specified, raise an exception. else: raise AttributeError("Invalid Aspect Ratio specified: '" + str(aspectRatio) + "'") # If no preset ratio was selected, use other crop parameters. else: self.clip = self.clip.crop(x1=x1, y1=y1, x2=x2, y2=y2, width=width, height=height, x_center=x_center, y_center=y_center) def add_text(self, text, font_size, color, font, interline, posString, duration): """ Add a layer of text over the selected clip. :param text: :param font_size: :param color: :param font: :param interline: :param pos: :param duration: :return: """ pos = (self.clip.w / 2, self.clip.h / 2) if posString == 'top': pos = (self.clip.w / 2, self.clip.h / (self.clip.h - 0.5)) elif posString == 'left': pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / 2) elif posString == 'bottom': pos = (self.clip.w / 2, self.clip.h / 1.1) elif posString == 'right': pos = (self.clip.w / 1.1, self.clip.h / 2) elif posString == 'top-left': pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / (self.clip.h - 0.5)) elif posString == 'top-right': pos = (self.clip.w / 1.1, self.clip.h / (self.clip.h - 0.5)) elif posString == 'bottom-left': pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / 1.1) elif posString == 'bottom-right': pos = (self.clip.w / 1.1, self.clip.h / 1.1) text = TextClip( text, fontsize=font_size, color=color, font=font, interline=interline).set_pos(pos).set_duration(duration) self.clip = CompositeVideoClip([self.clip, text]) def addAudioFromFile(self, audio, start_time, end_time): """ Uses moviepy.audio.io.AudioFileClip module. from Doc: An audio clip read from a sound file, or an array. The whole file is not loaded in memory. Instead, only a portion is read and stored in memory. this portion includes frames before and after the last frames read, so that it is fast to read the sound backward and forward. :param audio: audio file taken from directory (mp3, wav, etc) :return: adds audio to the clip being worked on (self.clip) This method works with the clip that was made and is stored on self.clip, which means it will alter the a clip that is already being made, not a new external clip. This is to avoid discrepancies when making new clips with or without overlay audio. """ thisAudio = AudioFileClip(audio) changedAudio = thisAudio.subclip(start_time, end_time) self.clip = self.clip.set_audio(changedAudio) def addAudioFromClip(self, clipToExtract, start_time, end_time): """ Instead of using an audio file like the method before this, it takes another video such as an mp4 file and rips the audio out of it, converts it into an AudioClip, and overlays it on the clip that is currently being worked on. ****This DOES NOT work with clips made through the VideoFileClip() method, since they have been processed as a different file type, and already have their own audio attribute. To access such, one just needs to call 'clip'.audio, clip being your target clip for audio extraction. :param clipToExtract: video from directory (mp4, etc) :return: adds audio to the clip being worked on (self.clip) """ thisAudio = AudioFileClip(clipToExtract) changedAudio = thisAudio.subclip(start_time, end_time) self.clip = self.clip.set_audio(changedAudio) def writeVideo(self, filename): """ Write the video to a file. :param filename: name and format of output file. :return: """ self.clip.write_videofile(filename) def create_gif(self, filename): # TODO: gif that loops fluidly self.clip.write_gif(filename)