def make_zoom(scale_func, path=im_path, cx=32, cy=32, scale=10, duration=5, fps=10, oversample=2.0): ic = ImageClip(path).resize(oversample) bg = ColorClip((ic.w, ic.h), (0xFF, 0xFF, 0xFF)).set_duration(duration) ic.duration = duration cx *= oversample cy *= oversample total_frames = int(duration * fps) def zoom_between_frames(startf, endf): scales = [ scale_func(startf + f * (endf - startf) / total_frames) for f in range(total_frames) ] return make_zoom_movie(ic, scales, fps, (cx, cy)) # we seem to get two multiple frames at the start... # and end sometimes ret = CompositeVideoClip([ bg, zoom_between_frames(total_frames, 2.0 * total_frames), zoom_between_frames(0, total_frames) ]) ret.size = ic.size # ret.duration = duration return ret.resize(1.0 / oversample)
def addAudioToVideo(name): try: os.chdir(os.path.join(settings.BASE_DIR, r"dataset/" + name)) print(os.listdir()) audiofile = AudioFileClip('audio.mp3') bgaudiofile = AudioFileClip(os.path.join(settings.BASE_DIR, r"bg.mp3")) videoclip = VideoFileClip("mygeneratedvideo.mp4") new_audioclip = CompositeAudioClip([audiofile, bgaudiofile]) videoclip = videoclip.set_audio(audiofile) # videoclip.audio = new_audioclip videoclip = videoclip.subclip(0, audiofile.duration) videoclip = videoclip.speedx(factor=1.1) clip = VideoFileClip( 'https://github.com/mashuk999/green-screen-video-python/blob/main/greenscreen.mp4?raw=true' ) maskedclipDurationMultiplier = int(videoclip.duration // clip.duration) maskedClipList = [] for iterator in range(maskedclipDurationMultiplier): maskedClipList.append(clip) #Adding Anchor clip = concatenate_videoclips(maskedClipList) masked_clip = clip.fx(vfx.mask_color, color=[109, 246, 16], thr=80, s=5) masked_clip = masked_clip.resize(videoclip.size).set_pos( ('center', 'bottom')) final_clip = CompositeVideoClip([videoclip, masked_clip]) final_clip = final_clip.resize((460, 720)) # videoclip = videoclip.fx(speedx, 1.3) os.chdir(os.path.join(settings.BASE_DIR, "")) final_clip.write_videofile("test" + ".mp4") except Exception as e: print('addaudioto video m.v.') print(e)
class BaseClip: def __init__(self, clip): self.clip = CompositeVideoClip(clips=[clip]) self.duration = self.clip.duration def resize(self, new_size): """ Uses moviepy.video.fx.all.resize module :param new_size: Can be wither(width,height) in pixels or a float A scaling factor, like 0.5 A function of time returning one of these. """ self.clip = self.clip.resize(new_size) def crop(self, aspectRatio=None, x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None): """ Uses moviepy.video.fx.crop module. From documentation: Returns a new clip in which just a rectangular subregion of the original clip is conserved. x1,y1 indicates the top left corner and x2,y2 is the lower right corner of the croped region. All coordinates are in pixels. Float numbers are accepted. :param x1: top left corner x-axis :param y1: top left corner y-axis :param x2: bottom right corner x-axis :param y2: bottom right corner y-axis :param width: width of rectangle :param height: height of rectangle :param x_center: x-axis center :param y_center: y-axis center """ # If a preselected aspect ratio was selected. if aspectRatio: if not x_center: x_center = self.clip.w / 2 if not y_center: y_center = self.clip.h / 2 # Vertical/Phone ratio if aspectRatio == "vertical" or aspectRatio == "9:16" or aspectRatio == "phone": self.clip = self.clip.crop(width=self.clip.h * 9 / 16, height=self.clip.h, x_center=x_center, y_center=y_center) # Square ratio elif aspectRatio == "square" or aspectRatio == "1:1": self.clip = self.clip.crop(width=self.clip.h, height=self.clip.h, x_center=x_center, y_center=y_center) # 4:3/Letterbox ratio elif aspectRatio == "4:3" or aspectRatio == "1.33:1" or aspectRatio == "letterbox": self.clip = self.clip.crop(width=self.clip.h * 1.33, height=self.clip.h, x_center=x_center, y_center=y_center) # 16:9/Widescreen ratio elif aspectRatio == "16:9" or aspectRatio == "widescreen" or aspectRatio == "1.77:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 1.77, x_center=x_center, y_center=y_center) # 21:9/Cinemascope ratio elif aspectRatio == "cinemascope" or aspectRatio == "21:9" or aspectRatio == "2.33:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.33, x_center=x_center, y_center=y_center) # 2.35:1/Anamorphic ratio elif aspectRatio == "anamorphic" or aspectRatio == "2.35:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.35, x_center=x_center, y_center=y_center) # 2.39:1/DCI ratio elif aspectRatio == "DCI" or aspectRatio == "2.39:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.39, x_center=x_center, y_center=y_center) # 2.9:1/Digital IMAX ratio elif aspectRatio == "Digital IMAX" or aspectRatio == "2.9:1": self.clip = self.clip.crop(width=self.clip.w, height=self.clip.w / 2.9, x_center=x_center, y_center=y_center) # If an invalid aspect ratio was specified, raise an exception. else: raise AttributeError("Invalid Aspect Ratio specified: '" + str(aspectRatio) + "'") # If no preset ratio was selected, use other crop parameters. else: self.clip = self.clip.crop(x1=x1, y1=y1, x2=x2, y2=y2, width=width, height=height, x_center=x_center, y_center=y_center) def add_text(self, text, font_size, color, font, interline, posString, duration): """ Add a layer of text over the selected clip. :param text: :param font_size: :param color: :param font: :param interline: :param pos: :param duration: :return: """ pos = (self.clip.w / 2, self.clip.h / 2) if posString == 'top': pos = (self.clip.w / 2, self.clip.h / (self.clip.h - 0.5)) elif posString == 'left': pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / 2) elif posString == 'bottom': pos = (self.clip.w / 2, self.clip.h / 1.1) elif posString == 'right': pos = (self.clip.w / 1.1, self.clip.h / 2) elif posString == 'top-left': pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / (self.clip.h - 0.5)) elif posString == 'top-right': pos = (self.clip.w / 1.1, self.clip.h / (self.clip.h - 0.5)) elif posString == 'bottom-left': pos = (self.clip.w / (self.clip.w - 0.5), self.clip.h / 1.1) elif posString == 'bottom-right': pos = (self.clip.w / 1.1, self.clip.h / 1.1) text = TextClip( text, fontsize=font_size, color=color, font=font, interline=interline).set_pos(pos).set_duration(duration) self.clip = CompositeVideoClip([self.clip, text]) def addAudioFromFile(self, audio, start_time, end_time): """ Uses moviepy.audio.io.AudioFileClip module. from Doc: An audio clip read from a sound file, or an array. The whole file is not loaded in memory. Instead, only a portion is read and stored in memory. this portion includes frames before and after the last frames read, so that it is fast to read the sound backward and forward. :param audio: audio file taken from directory (mp3, wav, etc) :return: adds audio to the clip being worked on (self.clip) This method works with the clip that was made and is stored on self.clip, which means it will alter the a clip that is already being made, not a new external clip. This is to avoid discrepancies when making new clips with or without overlay audio. """ thisAudio = AudioFileClip(audio) changedAudio = thisAudio.subclip(start_time, end_time) self.clip = self.clip.set_audio(changedAudio) def addAudioFromClip(self, clipToExtract, start_time, end_time): """ Instead of using an audio file like the method before this, it takes another video such as an mp4 file and rips the audio out of it, converts it into an AudioClip, and overlays it on the clip that is currently being worked on. ****This DOES NOT work with clips made through the VideoFileClip() method, since they have been processed as a different file type, and already have their own audio attribute. To access such, one just needs to call 'clip'.audio, clip being your target clip for audio extraction. :param clipToExtract: video from directory (mp4, etc) :return: adds audio to the clip being worked on (self.clip) """ thisAudio = AudioFileClip(clipToExtract) changedAudio = thisAudio.subclip(start_time, end_time) self.clip = self.clip.set_audio(changedAudio) def writeVideo(self, filename): """ Write the video to a file. :param filename: name and format of output file. :return: """ self.clip.write_videofile(filename) def create_gif(self, filename): # TODO: gif that loops fluidly self.clip.write_gif(filename)
def crop_scale(self, dimensions: Tuple[int, int]) -> 'Segment': """ Returns ------- A new Segment, cropped and/or scaled as necessary to reach specified dimensions """ segment = self.copy() dimensions = Dimensions(*dimensions) def blur(image): #return cv2.GaussianBlur(image.astype(float),(99,99),0) return cv2.blur(image.astype(float), (30, 30), 0) #if segment.aspect_ratio != dimensions.aspect_ratio: # Crop segment to match aspect ratio #segment = segment.crop_to_aspect_ratio(dimensions.aspect_ratio) #if segment.dimensions != dimensions: # Resize segment to reach final dimensions #segment = segment.resize(dimensions) replace_width = dimensions.width replace_height = dimensions.height if segment.aspect_ratio != replace_width / replace_height: ##########################################Below 1 AR################################################## if segment.aspect_ratio <= 1: #print("below 1") if segment.size[0] != replace_width: segment = segment.resize(width=replace_width) if segment.size[1] != replace_height: segment = segment.resize(height=replace_height) segment = segment.set_position("center") background1 = segment.crop(x1=0, width=(segment.w / 2)) background2 = segment.crop(x1=(segment.w / 2), width=(segment.w / 2)) if segment.aspect_ratio != 1: #print("Not 1:1") background1 = background1.resize( width=(replace_width - segment.w) / 2) background2 = background2.resize( width=((replace_width - segment.w) / 2) + 1) background1 = background1.set_position( ("left", 'center')).fl_image(blur) background2 = background2.set_position( ("right", 'center')).fl_image(blur) segment = CompositeVideoClip( [background1, background2, segment], size=(replace_width, replace_height)) segment.effects = self.effects #########################################Above 1080 ratio############################################### elif segment.aspect_ratio > round(replace_width / replace_height, 2): #print("above 1.7") if segment.size[1] != replace_height: segment = segment.resize(height=replace_height) if segment.size[0] != replace_width: segment = segment.resize(width=replace_width) test = (replace_height - segment.h) / 2 segment = segment.set_position("center") background1 = segment.crop( y1=0, height=((replace_height - segment.h) / 2)) background2 = segment.crop(y1=segment.h - test, height=test) background1 = background1.set_position( ('center', 'top')).fl_image(blur) background2 = background2.set_position( ('center', 'bottom')).fl_image(blur) segment = CompositeVideoClip( [background1, background2, segment], size=(replace_width, replace_height)) segment.effects = self.effects ######################################other####################################################### elif segment.aspect_ratio > 1 and segment.aspect_ratio < round( replace_width / replace_height, 2): #print('midway between') if segment.size[0] != replace_width: segment = segment.resize(width=replace_width) if segment.size[1] != replace_height: segment = segment.resize(height=replace_height) segment = segment.set_position("center") background1 = segment.crop(x1=0, width=(segment.w / 2)) background2 = segment.crop(x1=(segment.w / 2), width=(segment.w / 2)) background1 = background1.set_position( ("left", 'center')).fl_image(blur) background2 = background2.set_position( ("right", 'center')).fl_image(blur) segment = CompositeVideoClip( [background1, background2, segment], size=(replace_width, replace_height)) segment.effects = self.effects ############################################################################################# if segment.w != replace_width and segment.h != replace_height: segment = segment.resize((replace_width, replace_height)) #print("On Aspect, too big or small") return segment
def crosscut(videos_path="./video", option="random"): min_time = 1000.0 min_idx = 0 audioclip = None extracted_clips_array = [] # VIDEO SONG START TIME ARRAY # 0 1 2 3 4 5 6 7 8 9 10 # start_times = [0, 4, 4, 0, 0, 1, 14, 0, 0, 0, 0] # start_times = [0.3, 1, 0] # 노래 개수 start_times = [0 for i in range(10)] # 노래 개수 # VIDEO ALIGNMENT -> SLICE START TIME for i in range(len(os.listdir(videos_path))): video_path = os.path.join( videos_path, sorted(os.listdir(videos_path))[i]) # 순서가 뒤죽박죽 되지 않게! clip = VideoFileClip(video_path) clip = clip.subclip(start_times[i], clip.duration) # 영상 시작점을 맞추기 위해서 start_times 참조 print(video_path, clip.fps, clip.duration) if min_time > clip.duration: # 제일 작은 영상 기준으로 길이 잡기 audioclip = clip.audio min_time = clip.duration min_idx = i extracted_clips_array.append(clip) print(len(extracted_clips_array), ' videos min idx is ', min_idx, ' time', min_time) con_clips = [] t = 3 current_idx = 0 check_tqdm = 1 con_clips.append(extracted_clips_array[current_idx].subclip( 0, min(t, int(min_time)))) # 앞에서 시작점은 맞춰졌으므로 0부터 시작하면 된다! # GENERATE STAGEMIX # CONCAT SUBCLIP 0~ MIN DURATION CLIP TIME while t < min_time: print( check_tqdm, '------------------------------------------------------------------' ) check_tqdm += 1 # 최대 WINDOW TIME만큼 영상을 generate 할 예정 cur_t = t next_t = min(t + WINDOW_TIME, min_time) # 마지막은 window초보다 작은초일수도 있으니 # RANDOM BASED METHOD if option == "random" or min( min_time, t + PADDED_TIME) == min_time: # 혹시 제일 마지막 영상이면 random으로 생성할 수도 있음! random_video_idx = random.randint(0, len(extracted_clips_array) - 1) clip = extracted_clips_array[random_video_idx].subclip( cur_t, next_t) t = next_t con_clips.append(clip) else: reference_clip = extracted_clips_array[current_idx].subclip( cur_t, next_t) # 지금 현재 영상! d = 5000000 # init # 거리가 Inf일때는 있을때는 이 idx로 설정됨! min_idx = (current_idx + 1) % len(extracted_clips_array) for video_idx in range(len(extracted_clips_array)): # 같은 영상 나올수도 있는 문제 해결 if video_idx == current_idx: continue # 10초간 영상 확인 clip = extracted_clips_array[video_idx].subclip(cur_t, next_t) # PADDING TIME이 들어가면 엄청 좋은 부분을 놓칠수도 있지만, 넣어야 계속해서 그 주변에서 전환되는 문제가 해결됨! # CALCULATE DISTANCE between reference_clip, compare_clip(같은초에서 최선의 거리 장면 찾기) cur_d, plus_frame, refer_length, refer_degree, compare_length, compare_degree, refer_point, compare_point = distance( reference_clip, clip) print('from video:', current_idx, ' to video', video_idx, ' in distance ', cur_d, ' in sec ', cur_t + plus_frame, 'first deg ', refer_degree, 'second deg ', compare_degree) if d > cur_d: # 최소 정보 찾기! d = cur_d min_idx = video_idx next_t = cur_t + plus_frame # 바로 옮길 frame cur_clip = reference_clip.subclip(0, plus_frame) next_clip = clip.subclip(0, plus_frame) # 그 바꿀 부분만 자르는 클립! compare_point_max = compare_point refer_point_max = refer_point refer_length_max = refer_length # 이거에 맞춰서 확대 축소 해줄거야! compare_length_max = compare_length # 이거에 맞춰 확대 축소 해줄거야! refer_degree_max = refer_degree compare_degree_max = compare_degree if d == 5000000 or ( not cur_clip): # 거리가 모두 inf일떄,cur_clip 자체가 비어있을때 # current_idx는 다음으로 넘어간다!!! current_idx = min_idx # 다음에 재생될 idx clip = reference_clip # 현재 클립(여기는 거리가 Inf이므로 10초 전체가 잘려있다!) t = next_t con_clips.append(clip) if t < min_time: # t가 이미 min_time을 넘었을땐 더할 필요도 없음! # 뒤에 padding 데이터 더하기 pad_clip = extracted_clips_array[current_idx].subclip( t, min(min_time, t + PADDED_TIME)) # min_time을 넘어가면 안됨! t = min(min_time, t + PADDED_TIME) # padding 된 시간 더하기 con_clips.append(pad_clip) else: # (!! 현재 영상을 concat 하고 다음에 넣을 영상 idx를 저장해야 한다!) prev_idx = current_idx # current_idx는 다음으로 넘어간다!!! current_idx = min_idx # 바로 다음에 이어지면 가까운 거리로 연결되는 데이터 print("next video idx : {}".format(current_idx)) print(refer_length_max, compare_length_max, '----refer, compare length max') print(refer_point_max, compare_point_max, '----left eye point information') clip = cur_clip # 현재 클립(바꾸면 가장 좋은 부분까지 잘린 현재 클립) # 여기서 편집하기 ------------------------------------------------- t = next_t # -------------------------------------------------------------- # 1. Transition 전 영상 효과 없이 넣기 ------------------------------ # -------------------------------------------------------------- clip_front = clip.subclip( 0, clip.duration - (ONE_FRAME_SEC * ZOOM_FRAME)) # 그 바꿀 부분만 자르는 클립! con_clips.append(clip_front) # -------------------------------------------------------------- # 2. Transition 영상 넣기(ZOOM_FRAME만큼) -------------------------- # -------------------------------------------------------------- clip_back = clip.subclip( clip.duration - (ONE_FRAME_SEC * ZOOM_FRAME), clip.duration) ## 해당 조건을 만족하면 resize및 transition 허용 if abs(compare_length_max - refer_length_max) < EYE_MIN_DIFF and abs( compare_degree_max - refer_degree_max) < ROTATE_MAX: # 앞 영상이 더 작으면 Moving 함수를 실행해서 앞 영상 확대하기 if compare_length_max > refer_length_max and compare_length_max - refer_length_max < EYE_MIN_DIFF: clip_back = clip_back.fl( Moving(refer_point_max, compare_point_max, compare_length_max / refer_length_max, 'small_to_big', refer_degree_max - compare_degree_max)) clip_back = clip_back.resize((1280, 720)) else: # 뒤 영상이 더 작으면 ForceZoom을 통해서 사이즈 맞추기(ONE_ZOOM을 통해서 더 커지기 때문에) clip_back = clip_back.fl( ForceZoom(compare_point_max, refer_point_max, refer_length_max / compare_length_max, 'small_to_big')) clip_back = clip_back.resize((1280, 720)) con_clips.append(clip_back) else: con_clips.append(clip_back) # --------------------------------------------------- # 3. 다음 영상에 padding 데이터 더하기 --------------------- # --------------------------------------------------- pad_clip = extracted_clips_array[current_idx].subclip( t, min(min_time, t + PADDED_TIME)) # min_time을 넘어가면 안됨! # padding 데이터도 효과를 넣을지 안넣을지 판단! if abs(compare_length_max - refer_length_max) < EYE_MIN_DIFF and abs( compare_degree_max - refer_degree_max) < ROTATE_MAX: ### PAD FRONT --------------- pad_front = pad_clip.subclip( 0, ONE_FRAME_SEC * ZOOM_FRAME) # 그 바꿀 부분만 자르는 클립! # 앞이 더 크고 뒤(pad_clip)가 작을 때 if refer_length_max > compare_length_max and refer_length_max - compare_length_max < EYE_MIN_DIFF: # pad_clip을 확대 해줘야 함 pad_front = pad_front.fl( Moving(compare_point_max, refer_point_max, refer_length_max / compare_length_max, 'big_to_small', compare_degree_max - refer_degree_max)) pad_front = pad_front.resize((1280, 720)) # 앞 영상 연속해서 틀면서 cross fade!(이때는 앞 영상은 회전및 확대가 없으므로 그대로 재생!) cross_clip = extracted_clips_array[prev_idx].subclip( t, t + ONE_FRAME_SEC * CROSS_FRAME) # min_time을 넘어가면 안됨! cross_clip = cross_clip.fl( ForceZoom(compare_point_max, refer_point_max, refer_length_max / compare_length_max, 'same')) # 여기서도 ForceZoom 필수! pad_front = CompositeVideoClip([ pad_front, cross_clip.crossfadeout(ONE_FRAME_SEC * CROSS_FRAME) ]) else: # 앞이 더 작은 경우 pad_front = pad_front.fl( ForceZoom(refer_point_max, compare_point_max, compare_length_max / refer_length_max, 'big_to_small')) pad_front = pad_front.resize((1280, 720)) cross_clip = extracted_clips_array[prev_idx].subclip( t, t + ONE_FRAME_SEC * CROSS_FRAME) # min_time을 넘어가면 안됨! cross_clip = cross_clip.fl( Moving(refer_point_max, compare_point_max, compare_length_max / refer_length_max, 'same', refer_degree_max - compare_degree_max)) pad_front = CompositeVideoClip([ pad_front, cross_clip.crossfadeout(ONE_FRAME_SEC * CROSS_FRAME) ]) con_clips.append(pad_front) ### PAD BACK --------------- pad_back = pad_clip.subclip( ONE_FRAME_SEC * ZOOM_FRAME, pad_clip.duration) # 그 바꿀 부분만 자르는 클립! t = min(min_time, t + PADDED_TIME) # padding 된 시간 더하기 con_clips.append(pad_back) else: t = min(min_time, t + PADDED_TIME) # padding 된 시간 더하기 con_clips.append(pad_clip) # 영상 다 붙이기! final_clip = concatenate_videoclips(con_clips) if audioclip != None: final_clip.audio = audioclip final_clip.write_videofile("random.mp4") return final_clip