def load_clip(index): image = self.sequence[index] text = titles[index] if text.startswith("W:"): text = text[2:] show_full_height = True else: show_full_height = False if height is None and width is None: clip = ImageClip(image, duration=image_duration) else: if zoom_images: clip = ImageClip(image, duration=image_duration) \ .fx(image_effect, screensize=(width, height), \ duration=20, show_full_height=show_full_height) elif show_full_height: clip = ImageClip(image, duration=image_duration) \ .fx(resize, height=height).set_position('center', 'center') clip = CompositeVideoClip([clip], size=(width, height)) else: clip = ImageClip(image, duration=image_duration) \ .fx(resize, height=height, width=width) #Adds text label etc. on clip clip = make_clip(clip, text, height, width, font, font_color, fontsize) return clip
def __init__(self, img, ismask=False, transparent=True, fromalpha=False, duration=None, dpi=96, width=None, height=None): self.svg = svgutils.transform.fromfile(img) svg_bytestring = self.svg.to_str() png_file = io.BytesIO() if width is not None and height is not None: current_width = float(self.svg.width.replace("px", "")) current_height = float(self.svg.height.replace("px", "")) scale = max(height / current_height, width / current_width) cairosvg.svg2png(bytestring=svg_bytestring, write_to=png_file, parent_width=width, parent_height=height, scale=scale) else: #Converts to png and saves to bytestring cairosvg.svg2png(bytestring=svg_bytestring, write_to=png_file) #np_img = svg_to_npim(svg_bytestring, dpi) np_img = imread(png_file.getvalue()) ImageClip.__init__(self, np_img, ismask=ismask, transparent=transparent, fromalpha=fromalpha, duration=duration)
def render(): if img1 is None: alert = QMessageBox() alert.setText('64비트 이미지를 선택해주세요.') alert.exec_() return if not pixelized: alert = QMessageBox() alert.setText('픽셀화를 해주세요.') alert.exec_() return if img2 is None: alert = QMessageBox() alert.setText('128비트 이미지를 선택해주세요.') alert.exec_() return image = Image.open(img2) resize(image).save('./resource/128bit.png') clips = [ImageClip(f'./resource/{m}').set_duration(1) for m in ['1bit.png', '2bit.png', '4bit.png', '8bit.png']] clips.append(ImageClip('./resource/16bit.png').set_duration(1.6)) clips.append(ImageClip('./resource/32bit.png').set_duration(1.8)) clips.append(ImageClip('./resource/64bit.png').set_duration(2)) clips.append(ImageClip('./resource/128bit.png').set_duration(1)) concat_clip = concatenate_videoclips(clips, method="compose") concat_clip.audio = AudioFileClip(r"./resource/audio.mp3") concat_clip.write_videofile("result.mp4", fps=24) alert = QMessageBox() alert.setText('렌더링 완료 result.mp4가 생성되었습니다.') alert.exec_()
def test_issue_285(): clip_1, clip_2, clip_3 = ( ImageClip("media/python_logo.png", duration=10), ImageClip("media/python_logo.png", duration=10), ImageClip("media/python_logo.png", duration=10), ) merged_clip = concatenate_videoclips([clip_1, clip_2, clip_3]) assert merged_clip.duration == 30
def addlogo(file_dir,img="",time=20,X=30,Y=30): clip = VideoFileClip(file_dir) img_clip = ImageClip(img) #位置 img_clip = img_clip.set_pos((X,Y)).set_duration(time) clip = CompositeVideoClip([clip, img_clip]) filen = os.path.basename(file_dir) clip.write_videofile(work_path+"\\"+filen) clip.close()
def find_objects(clip, size_threshold=500, preview=False): """ Returns a list of ImageClips representing each a separate object on the screen. size_threshold : all objects found with size < size_threshold will be considered false positives and will be removed """ image = clip.get_frame(0) if not clip.mask: clip = clip.add_mask() mask = clip.mask.get_frame(0) labelled, num_features = ndi.measurements.label(image[:, :, 0]) # find the objects slices = [] for obj in ndi.find_objects(labelled): if mask[obj[0], obj[1]].mean() <= 0.2: # remove letter holes (in o,e,a, etc.) continue if image[obj[0], obj[1]].size <= size_threshold: # remove very small slices continue slices.append(obj) indexed_slices = sorted(enumerate(slices), key=lambda slice: slice[1][1].start) letters = [] for i, (sy, sx) in indexed_slices: """ crop each letter separately """ sy = slice(sy.start - 1, sy.stop + 1) sx = slice(sx.start - 1, sx.stop + 1) letter = image[sy, sx] labletter = labelled[sy, sx] maskletter = (labletter == (i + 1)) * mask[sy, sx] letter = ImageClip(image[sy, sx]) letter.mask = ImageClip(maskletter, is_mask=True) letter.screenpos = np.array((sx.start, sy.start)) letters.append(letter) if preview: import matplotlib.pyplot as plt print(f"Found {num_features} objects") fig, ax = plt.subplots(2) ax[0].axis("off") ax[0].imshow(labelled) ax[1].imshow([range(num_features)], interpolation="nearest") ax[1].set_yticks([]) plt.show() return letters
def test_exifrotate(): image_file = 'media/balloons_portrait.jpg' with ImageClip(image_file, duration=1) as clip: assert clip.img.meta['EXIF_MAIN']['ExifImageWidth'] == 4032 assert clip.img.meta['EXIF_MAIN']['ExifImageHeight'] == 3024 assert clip.img.meta['EXIF_MAIN']['Orientation'] == 6 assert clip.size == (3024, 4032) with ImageClip(image_file, duration=1, imageio_params={'exifrotate': False}) as clip: assert clip.size == (4032, 3024)
def _make_circle(radius, color=(0, 255, 0)): circle_clip = ImageClip( circle((2 * radius, 2 * radius), (radius, radius), radius, color, (0, 0, 0))) #Make mask from it (channel 1 - green) since it's single color circle_mask = ImageClip(circle((2 * radius, 2 * radius), (radius, radius), radius, 1, 0), ismask=True) #And use it as a mask circle_clip = circle_clip.set_mask(circle_mask) return circle_clip, radius
def test_issue_782(): clip = ImageClip('flower.png') clip_2 = clip.fx(vfx.mirror_y) if clip.image_transform == clip_2.image_transform: assert False else: pass clip_3 = clip.fx(vfx.mirror_x) if clip.image_transform == clip_3.image_transform: assert False else: assert True
def findObjects(clip, rem_thr=500, preview=False): """ Returns a list of ImageClips representing each a separate object on the screen. rem_thr : all objects found with size < rem_Thr will be considered false positives and will be removed """ image = clip.get_frame(0) if clip.mask == None: clip = clip.add_mask() mask = clip.mask.get_frame(0) labelled, num_features = ndi.measurements.label(image[:, :, 0]) # find the objects slices = ndi.find_objects(labelled) # cool trick to remove letter holes (in o,e,a, etc.) slices = [e for e in slices if mask[e[0], e[1]].mean() > 0.2] # remove very small slices slices = [e for e in slices if image[e[0], e[1]].size > rem_thr] # Sort the slices from left to right islices = sorted(enumerate(slices), key=lambda s: s[1][1].start) letters = [] for i, (ind, (sy, sx)) in enumerate(islices): """ crop each letter separately """ sy = slice(sy.start - 1, sy.stop + 1) sx = slice(sx.start - 1, sx.stop + 1) letter = image[sy, sx] labletter = labelled[sy, sx] maskletter = (labletter == (ind + 1)) * mask[sy, sx] letter = ImageClip(image[sy, sx]) letter.mask = ImageClip(maskletter, ismask=True) letter.screenpos = np.array((sx.start, sy.start)) letters.append(letter) if preview: import matplotlib.pyplot as plt print "found %d objects" % (num_features) fig, ax = plt.subplots(2) ax[0].axis("off") ax[0].imshow(labelled) ax[1].imshow([range(num_features)], interpolation="nearest") ax[1].set_yticks([]) plt.show() return letters
def findObjects(clip, rem_thr=500, preview=False): """ Returns a list of ImageClips representing each a separate object on the screen. rem_thr : all objects found with size < rem_Thr will be considered false positives and will be removed """ image = clip.get_frame(0) if clip.mask is None: clip = clip.add_mask() mask = clip.mask.get_frame(0) labelled, num_features = ndi.measurements.label(image[:, :, 0]) #find the objects slices = ndi.find_objects(labelled) # cool trick to remove letter holes (in o,e,a, etc.) slices = [e for e in slices if mask[e[0], e[1]].mean() > 0.2] # remove very small slices slices = [e for e in slices if image[e[0], e[1]].size > rem_thr] # Sort the slices from left to right islices = sorted(enumerate(slices), key=lambda s: s[1][1].start) letters = [] for i, (ind, (sy, sx)) in enumerate(islices): """ crop each letter separately """ sy = slice(sy.start - 1, sy.stop + 1) sx = slice(sx.start - 1, sx.stop + 1) letter = image[sy, sx] labletter = labelled[sy, sx] maskletter = (labletter == (ind + 1)) * mask[sy, sx] letter = ImageClip(image[sy, sx]) letter.mask = ImageClip(maskletter, ismask=True) letter.screenpos = np.array((sx.start, sy.start)) letters.append(letter) if preview: import matplotlib.pyplot as plt print("found %d objects" % (num_features)) fig, ax = plt.subplots(2) ax[0].axis('off') ax[0].imshow(labelled) ax[1].imshow([range(num_features)], interpolation='nearest') ax[1].set_yticks([]) plt.show() return letters
def _render_clip(self, frames): logger = logging.getLogger('logger') logger.info("Rendering video...") clips = [] clip_duration = 1 / self.frame_rate for frame in frames: clip = ImageClip(frame.img) clip = clip.set_duration(clip_duration) clips.append(clip) final_clip = concatenate_videoclips(clips, method="chain") final_clip = final_clip.set_audio(AudioFileClip(self.audio.path)) final_clip = final_clip.set_fps(self.frame_rate) return final_clip
def make_clip(self, value, width=None, height=None): """ value to show on gauge Width and height can also be changed since it's SVG """ #calculates to which angle we need to turn cursor based on speed angle = self.map_speed(value) #Turns the cursor. We need to use this instead of rotate function since we need #to forget previous transformations self.cursor.root.set("transform", "rotate(%f %f %f)" % (angle, self.cursor_x, self.cursor_y)) svg_bytestring = self.svg.to_str() png_file = io.BytesIO() if width is not None and height is not None: current_width = float(self.svg.width) current_height = float(self.svg.height) scale = max(height/current_height, width/current_width) cairosvg.svg2png(bytestring=svg_bytestring,write_to=png_file, parent_width=width, parent_height=height, scale=scale) else: #Converts to png and saves to bytestring cairosvg.svg2png(bytestring=svg_bytestring,write_to=png_file) #Reads as numpy image #TODO: does transparency work? return ImageClip(imread(png_file.getvalue()), transparent=self.transparent)
def test_PR_528(util): with ImageClip("media/vacation_2017.jpg") as clip: new_clip = scroll(clip, w=1000, x_speed=50) new_clip = new_clip.with_duration(0.2) new_clip.fps = 24 new_clip.write_videofile(os.path.join(util.TMP_DIR, "pano.mp4"), logger=None)
def freeze_at_end(clip, freeze_duration=None, total_duration=None): """ Makes the clip freeze on its last frame. With ``duration`` you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. """ freezed_clip = ImageClip(clip.get_frame(clip.end)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return CompositeVideoClip([clip,freezed_clip.set_start(clip.end)])
def chose2(): print("This takes some time..") time.sleep(2) for (dirpath, dirnames, filenames) in walk(os.getcwd()): files.extend(filenames) break for i in files: if (i[-3:]) == "jpg": images.append(ImageClip(i).resize([640, 1136]).set_duration(3)) if (i[-3:]) == "mp4": videos.append(VideoFileClip(i)) final_clip = concatenate_videoclips(videos + images, method="compose") final_clip.write_videofile("input.mp4") time.sleep(2) stream = os.popen( r'ffmpeg -i input.mp4 -lavfi "[0:v]scale=1920*2:1080*2,boxblur=luma_radius=min(h\,w)/20:luma_power=1:chroma_radius=min(cw\,ch)/20:chroma_power=1[bg];[0:v]scale=-1:1080[ov];[bg][ov]overlay=(W-w)/2:(H-h)/2,crop=w=1920:h=1080" Output.mp4' ) output = stream.read() os.rename(os.getcwd() + r"\input.mp4", os.getcwd() + r"\withBlackBorders.mp4") print("\nFinished! Enjoy Output.mp4")
def test_afterimage(): ai = ImageClip("media/afterimage.png") masked_clip = mask_color(ai, color=[0, 255, 1]) # for green some_background_clip = ColorClip((800, 600), color=(255, 255, 255)) final_clip = CompositeVideoClip([some_background_clip, masked_clip], use_bgclip=True).with_duration(0.2) final_clip.write_videofile(os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
def freeze_at_start(clip, freeze_duration=None, total_duration=None): """ Makes the clip freeze on its last frame. With ``duration`` you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. """ freezed_clip = ImageClip(clip.get_frame(0)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return concatenate([freezed_clip,clip])
def freeze_at_start(clip, freeze_duration=None, total_duration=None): """ Momentarily freeze the clip on its first frame. With ``duration``you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. """ freezed_clip = ImageClip(clip.get_frame(0)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return concatenate([freezed_clip, clip])
def define_image(op, ext_duration=None): """ Define a static image clip from source file. source - absolute path to the file duration - duration in seconds ext_duration - clip to obtain the duration from Mainly used for intro/outro and static background. """ if ext_duration: # clip = ImageClip(op.source, duration=find_video_period(ext_duration)) clip = ImageClip(op.source, duration=ext_duration.duration) else: clip = ImageClip(op.source, duration=op.duration) return clip
def test_find_objects(filename, expected_screenpos): clip = ImageClip(filename) objects = find_objects(clip) assert len(objects) == len(expected_screenpos) for i, object_ in enumerate(objects): assert np.array_equal(object_.screenpos, np.array(expected_screenpos[i]))
def create_image_clip(image_filename): # having some encoding issues in moviepy if isinstance(image_filename, unicode): image_filename = image_filename.encode(sys.getdefaultencoding()) image_filename = os.path.abspath(image_filename) assert (os.path.exists(image_filename)) return ImageClip(image_filename)
def freeze_at_end(clip, freeze_duration=None, total_duration=None, delta=0.05): """ Makes the clip freeze on its last frame. With ``duration`` you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. The clip is frozen on the frame at time (clip.duration - delta) """ freezed_clip = ImageClip(clip.get_frame(clip.end - delta)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return CompositeVideoClip([clip, freezed_clip.set_start(clip.end)])
def generate_intro(): logger.info('Generating intro...') color = (255, 255, 255) size = (1280, 720) clip = ColorClip(size, color, duration=3) logo = ImageClip(config.LOGO_PATH).set_duration(clip.duration) \ .resize(width=400, height=200) \ .set_pos(('center', 'center')) return CompositeVideoClip([clip, logo])
def insert_image_into_frame(frame, face_image): im_insert = ImageClip(face_image) im_insert = painting.painting(im_insert) frame_mask = frame.mask.get_frame(0) size = frame.mask.size black_pixels = np.where(frame_mask == 0.0) black_start = (black_pixels[1][0], black_pixels[0][0]) black_end = (black_pixels[1][-1], black_pixels[0][-1]) black_size = (black_end[0] - black_start[0], black_end[1] - black_start[1]) comp_clip = im_insert.resize(black_size) comp_clip = comp_clip.set_pos(black_start) composite = CompositeVideoClip([frame, comp_clip], frame.size) standardize_clip(composite) return composite
def clip_from_path(path): if path == '': return None elif path.suffix.lower() in IMAGE_EXTENSIONS: from moviepy.video.VideoClip import ImageClip return ImageClip(str(path)) else: from moviepy.video.io.VideoFileClip import VideoFileClip return VideoFileClip(str(path))
def add_pics(self, clip): logger.info('Adding pics...') pic_clip = [clip] for i in range(self.pic_num): x_pos, y_pos, x_size, y_size = self.generate_coordinates(clip) pic_path = config.PIC_PATH + str(i) + '.jpg' pic = ImageClip(pic_path).set_duration(clip.duration).resize((x_size, y_size)) \ .set_pos((x_pos, y_pos)).add_mask().rotate(random.randint(-180, 180)) pic_clip.append(pic) return CompositeVideoClip(pic_clip)
def test_afterimage(): ai = ImageClip("media/afterimage.png") masked_clip = mask_color(ai, color=[0, 255, 1]) # for green some_background_clip = ColorClip((800, 600), color=(255, 255, 255)) final_clip = CompositeVideoClip([some_background_clip, masked_clip], use_bgclip=True) final_clip.duration = 5 final_clip.write_videofile("/tmp/afterimage.mp4", fps=30)
def createVideo(folder, file_movie_name): list_img = [] for img in os.listdir(folder): list_img.append(folder + "/" + img) list_img.sort() clips = [ImageClip(m).set_duration(3) for m in list_img] concat_clip = concatenate_videoclips(clips, method="compose") concat_clip.write_videofile(file_movie_name, fps=24)
def create_silent_video(self, youtube=None, preview=False): log("Creating silent video", prnt=True) base_path = str(self.podcast.id) + '/' + str(self.episode.id) video_path = base_path + '.mp4' with NamedTemporaryFile(suffix='.mp4') as tf: with NamedTemporaryFile(suffix='.mp3') as _audio: self.videosyncepisode.update_sync_status({ 'event': 'DOWNLOAD_AUDIO', 'timestamp': arrow.utcnow().isoformat() }) r = requests.get(self.audio_url, stream=True) for chunk in r.iter_content(chunk_size=4096): _audio.write(chunk) _audio.seek(0) song = AudioSegment.from_file(File(_audio), format='mp3') sound_length = math.ceil(len(song) / 1000) self.videosyncepisode.update_sync_status({ 'event': 'CREATE_VIDEO', 'timestamp': arrow.utcnow().isoformat() }) clip = ImageClip(self.videosyncepisode.artwork_path, duration=sound_length) clip.write_videofile(tf.name, fps=1, audio=_audio.name) self.videosyncepisode.update_sync_status({ 'event': 'UPLOAD_VIDEO', 'timestamp': arrow.utcnow().isoformat() }) video_id = self.upload_video(youtube, tf.name) return video_id return None
def generate_text_clip(text, number): filename = "tmp/" + name + "/clips/" + name + number + ".mp4" if not os.path.exists(filename): audio_filename = make_tts(text, number) audio = AudioFileClip(audio_filename) image = ImageClip(background_image).set_fps(30) video = image.set_duration(audio.duration) withaudio = video.set_audio(audio) fontsize = (len(text) + 10) / withaudio.w text_clip = TextClip(text, fontsize=fontsize, size=(withaudio.w, withaudio.h)).set_pos("center") final_clip = CompositeVideoClip( [withaudio, text_clip.set_duration(video.duration)]) final_clip.write_videofile(filename) return filename
def output_mp4(name, data, durations, max_workers=None, method='chain'): # sanity check the images sizes = {frame.shape for frame in data} assert method == 'compose' or len(sizes) == 1, sizes # turn the image into clips clips = [ImageClip(data, duration=d) for (data, d) in zip(data, durations)] # save the mp4 movie = concatenate_videoclips(clips, method=method) movie.write_videofile(str(output_path / (name + '.mp4')), fps=24, threads=max_workers or cpu_count(), bitrate='10M')
def create_thumbnail(self): logger.info('Creating thumbnail...') color = (255, 255, 255) size = (1280, 720) background = ColorClip(size, color) logo = ImageClip(config.LOGO_PATH).set_duration(1) \ .resize(width=400, height=200) \ .set_pos(('center', 'center')) text = TextClip(txt=str(self.id), size=(500, 500)).set_position( ('center', 'bottom')) CompositeVideoClip([background, logo, text]).save_frame(config.THUMB_PATH) logger.info('Thumbnail saved...')
def clip(self, frames_and_durations): if self.dynamic: clip = super().clip(frames_and_durations) else: clip = ImageClip(imageio.imread( next((output_path / self.dirname).glob('*.png'))), duration=sum(frames_and_durations.values())) if self.margin: params = self.margin.copy() params['mar'] = params.pop('margin') params['color'] = white clip = margin(clip, **params) return clip
def credits1(creditfile, width, stretch=30, color='white', stroke_color='black', stroke_width=2, font='Impact-Normal', fontsize=60, gap=0): """ Parameters ----------- creditfile A text file whose content must be as follows: :: # This is a comment # The next line says : leave 4 blank lines .blank 4 ..Executive Story Editor MARCEL DURAND ..Associate Producers MARTIN MARCEL DIDIER MARTIN ..Music Supervisor JEAN DIDIER width Total width of the credits text in pixels gap Horizontal gap in pixels between the jobs and the names color Color of the text. See ``TextClip.list('color')`` for a list of acceptable names. font Name of the font to use. See ``TextClip.list('font')`` for the list of fonts you can use on your computer. fontsize Size of font to use stroke_color Color of the stroke (=contour line) of the text. If ``None``, there will be no stroke. stroke_width Width of the stroke, in pixels. Can be a float, like 1.5. Returns --------- image An ImageClip instance that looks like this and can be scrolled to make some credits: Executive Story Editor MARCEL DURAND Associate Producers MARTIN MARCEL DIDIER MARTIN Music Supervisor JEAN DIDIER """ # PARSE THE TXT FILE with open(creditfile) as f: lines = f.readlines() lines = filter(lambda x: not x.startswith('\n'), lines) texts = [] oneline = True for l in lines: if not l.startswith('#'): if l.startswith('.blank'): for i in range(int(l.split(' ')[1])): texts.append(['\n', '\n']) elif l.startswith('..'): texts.append([l[2:], '']) oneline = True else: if oneline: texts.append(['', l]) oneline = False else: texts.append(['\n', l]) left, right = ["".join(l) for l in zip(*texts)] # MAKE TWO COLUMNS FOR THE CREDITS left, right = [TextClip(txt, color=color, stroke_color=stroke_color, stroke_width=stroke_width, font=font, fontsize=fontsize, align=al) for txt, al in [(left, 'East'), (right, 'West')]] cc = CompositeVideoClip([left, right.set_pos((left.w+gap, 0))], size=(left.w+right.w+gap, right.h), bg_color=None) # SCALE TO THE REQUIRED SIZE scaled = resize(cc, width=width) # TRANSFORM THE WHOLE CREDIT CLIP INTO AN ImageCLip imclip = ImageClip(scaled.get_frame(0)) amask = ImageClip(scaled.mask.get_frame(0), ismask=True) return imclip.set_mask(amask)
face_index = 0 movie_clip = concatenate_videoclips(scenes) # Place the sound sound_path = pick_random_entry_from_dir('sources/soundtrack/') soundtrack = AudioFileClip(sound_path) soundtrack = soundtrack.subclip(0, 30) movie_clip = movie_clip.set_audio(soundtrack) return movie_clip movie = create_movie(face_images) gradient = color_gradient(size=(1280, 720), p1=(640, 60), p2=(640, 660), col1=np.array([195, 75, 99], 'uint8'), col2=np.array([127, 251, 19], 'uint8'), shape='linear') gradient_clip = ImageClip(gradient) gradient_clip = gradient_clip.set_opacity(0.20) standardize_clip(gradient_clip) movie = CompositeVideoClip([movie, gradient_clip], movie.size) movie.duration = 30 movie.save_frame('frame.jpg') #movie.preview(fps=10, audio=False) #movie.duration = 1 movie.write_videofile('composite.mp4')
def credits1(creditfile,width,stretch=30,color='white', stroke_color='black', stroke_width=2, font='Impact-Normal',fontsize=60): """ The first credits I imagined. They take as argument a file like: :: # This is a comment # The next line says : leave 4 blank lines .blank 4 ..Executive Story Editor MARCEL DURAND ..Associate Producers MARTIN MARCEL DIDIER MARTIN ..Music Supervisor JEAN DIDIER And produce an ImageClip that looks like : Executive Story Editor MARCEL DURAND Associate Producers MARTIN MARCEL DIDIER MARTIN Music Supervisor JEAN DIDIER :param width: total width of the credits text :param stretch: stretch in pixels between the jobs and the names. The other keywords are passed to the ``TextClip``s """ # PARSE THE TXT FILE with open(creditfile) as f: lines = f.readlines() lines = filter(lambda x:not x.startswith('\n'),lines) texts = [] oneline=True for l in lines: if not l.startswith('#'): if l.startswith('.blank'): for i in range(int(l.split(' ')[1])): texts.append(['\n','\n']) elif l.startswith('..'): texts.append([l[2:],'']) oneline=True else: if oneline: texts.append(['',l]) oneline=False else: texts.append(['\n',l]) left,right = [ "".join(l) for l in zip(*texts)] # MAKE TWO COLUMNS FOR THE CREDITS left,right = [TextClip(txt,color=color,stroke_color=stroke_color, stroke_width=stroke_width,font=font, fontsize=fontsize,align=al) for txt,al in [(left,'East'),(right,'West')]] cc = CompositeVideoClip( [left, right.set_pos((left.w+stretch,0))], size = (left.w+right.w+stretch,right.h), transparent=True) # SCALE TO THE REQUIRED SIZE scaled = cc.fx(resize , width=width) # TRANSFORM THE WHOLE CREDIT CLIP INTO AN ImageCLip imclip = ImageClip(scaled.get_frame(0)) amask = ImageClip(scaled.mask.get_frame(0),ismask=True) return imclip.set_mask(amask)
def credits1(creditfile,width,stretch=30,color='white', stroke_color='black', stroke_width=2, font='Impact-Normal',fontsize=60): """ Parameters ----------- creditfile A text file whose content must be as follows: :: # This is a comment # The next line says : leave 4 blank lines .blank 4 ..Executive Story Editor MARCEL DURAND ..Associate Producers MARTIN MARCEL DIDIER MARTIN ..Music Supervisor JEAN DIDIER width Total width of the credits text in pixels gap Gap in pixels between the jobs and the names. **txt_kw Additional argument passed to TextClip (font, colors, etc.) Returns --------- image An ImageClip instance that looks like this and can be scrolled to make some credits : Executive Story Editor MARCEL DURAND Associate Producers MARTIN MARCEL DIDIER MARTIN Music Supervisor JEAN DIDIER """ # PARSE THE TXT FILE with open(creditfile) as f: lines = f.readlines() lines = filter(lambda x:not x.startswith('\n'),lines) texts = [] oneline=True for l in lines: if not l.startswith('#'): if l.startswith('.blank'): for i in range(int(l.split(' ')[1])): texts.append(['\n','\n']) elif l.startswith('..'): texts.append([l[2:],'']) oneline=True else: if oneline: texts.append(['',l]) oneline=False else: texts.append(['\n',l]) left,right = [ "".join(l) for l in zip(*texts)] # MAKE TWO COLUMNS FOR THE CREDITS left,right = [TextClip(txt,color=color,stroke_color=stroke_color, stroke_width=stroke_width,font=font, fontsize=fontsize,align=al) for txt,al in [(left,'East'),(right,'West')]] cc = CompositeVideoClip( [left, right.set_pos((left.w+gap,0))], size = (left.w+right.w+gap,right.h), transparent=True) # SCALE TO THE REQUIRED SIZE scaled = cc.fx(resize , width=width) # TRANSFORM THE WHOLE CREDIT CLIP INTO AN ImageCLip imclip = ImageClip(scaled.get_frame(0)) amask = ImageClip(scaled.mask.get_frame(0),ismask=True) return imclip.set_mask(amask)