def test_color_split( size, x, y, p1, p2, vector, color_1, color_2, gradient_width, expected_result, ): result = color_split( size, x=x, y=y, p1=p1, p2=p2, vector=vector, color_1=color_1, color_2=color_2, gradient_width=gradient_width, ) assert np.array_equal(result, expected_result)
def mask_frame(self, t): normalized_t = min(max(self.start_ts, t), self.end_ts) - self.start_ts pct = (normalized_t if normalized_t == 0 else normalized_t / (self.end_ts - self.start_ts)) return color_split(size=self.clip_size, x=int(pct * self.clip_width), col1=1, col2=0)
from moviepy.editor import * from moviepy.video.tools.drawing import color_split duracion = 6 # duracion of the final clip # LOAD THE MAIN SCENE # this small video contains the two scenes that we will put together. main_clip = VideoFileClip("../../videos/charadePhone.mp4") W, H = main_clip.tamano # MAKE THE LEFT CLIP : cut, crop, add a mask mask = color_split((2 * W / 3, H), p1=(W / 3, H), p2=(2 * W / 3, 0), col1=1, col2=0, grad_width=2) mask_clip = ImageClip(mask, ismask=True) clip_left = (main_clip.coreader().subclip(0, duracion).crop( x1=60, x2=60 + 2 * W / 3).set_mask(mask_clip)) # MAKE THE RIGHT CLIP : cut, crop, add a mask mask = color_split((2 * W / 3, H), p1=(2, H), p2=(W / 3 + 2, 0), col1=0, col2=1,
duration = 6 # duration of the final clip # LOAD THE MAIN SCENE # this small video contains the two scenes that we will put together. main_clip = VideoFileClip("../../videos/charadePhone.mp4") W, H = main_clip.size # MAKE THE LEFT CLIP : cut, crop, add a mask mask = color_split( (2 * W / 3, H), p1=(W / 3, H), p2=(2 * W / 3, 0), color_1=1, color_2=0, gradient_width=2, ) mask_clip = ImageClip(mask, is_mask=True) clip_left = ( main_clip.coreader() .subclip(0, duration) .crop(x1=60, x2=60 + 2 * W / 3) .with_mask(mask_clip) ) # MAKE THE RIGHT CLIP : cut, crop, add a mask
def processGif(videoId, start, end, pixelWidth, loop, maskType, stillFrame, mask, mp4, fps): print "////////////////" print "Processing your gif..." videoFile = getVideoPath(videoId) gifPath = getGifPath(videoId, start, end, pixelWidth, loop, maskType, stillFrame, mask, fps) print gifPath print "--------------" print "Making a gif " + pixelWidth + " pixels wide...." clip = (VideoFileClip(videoFile, audio=False) .subclip(float(start),float(end)) .resize(width=int(pixelWidth))) #.crop(x1=138.7,x2=640, y1=0, y2=512.8)) composition = clip d = clip.duration #deal with looping if (loop == "time_symetrize"): composition = clip.fx( time_symetrize ) d = d*2 if (loop == "progressive_fade"): clip = clip.crossfadein(d/2) composition = (CompositeVideoClip([clip, clip.set_start(d/2), clip.set_start(d)]).subclip(d/2, 3*d/2)) if (loop == "still_fade"): snapshot = (clip.to_ImageClip() .set_duration(d/6) .crossfadein(d/6) .set_start(5*d/6)) composition = CompositeVideoClip([clip, snapshot]) #deal with masking if (maskType): p = mask.split(',') if (maskType == 'maskLeft' or maskType == 'maskRight'): #maskLeft means the left side will be masked, splitRight means right side will be masked colLeft = 1 #col1 determines if the left side of the images is still (1) or animated (0) colRight = 0 if maskType == 'maskRight': colLeft = 0 colRight = 1 clipMask = dw.color_split(clip.size, p1=(float(p[0]), float(p[1])), p2=(float(p[2]), float(p[3])), col1=colLeft, col2=colRight, grad_width=25) # blur the mask's edges snapshot = (clip.to_ImageClip(t=(float(stillFrame))) .set_duration(d) .set_mask(ImageClip(clipMask, ismask=True))) composition = CompositeVideoClip([composition,snapshot]) if (maskType == 'maskOuter'): composition = composition.fx(vfx.freeze_region, outside_region=(p[0], p[1], p[2], p[3])) if (maskType == 'maskInner'): freeze = (composition.fx(vfx.crop, x1=p[0], y1=p[1], x2=p[2], y2=p[3]) .to_ImageClip(t=(float(stillFrame))) .set_duration(d) .set_position((p[0],p[1]))) composition = CompositeVideoClip([composition, freeze]) print "fps: " + str(fps) if (fps == "auto"): composition.write_gif(gifPath) #auto else: composition.write_gif(gifPath, fps=(float(fps))) if mp4 == "true": print "//////////////////" print "Writing your video...." mp4Path = getMp4Path(gifPath) composition.write_videofile(mp4Path) return os.path.join(_STATIC_URL, gifPath)
duration = 6 # duration of the final clip # LOAD THE MAIN SCENE # this small video contains the two scenes that we will put together. main_clip = VideoFileClip("../../videos/charadePhone.mp4") W,H = main_clip.size # MAKE THE LEFT CLIP : cut, crop, add a mask mask = color_split((2*W/3,H), p1=(W/3,H), p2=(2*W/3,0), col1=1, col2=0, grad_width=2) mask_clip = ImageClip(mask, ismask=True) clip_left = (main_clip.coreader() .subclip(0,duration) .crop( x1=60, x2=60 + 2*W/3) .set_mask(mask_clip)) # MAKE THE RIGHT CLIP : cut, crop, add a mask mask = color_split((2*W/3,H), p1=(2,H), p2=(W/3+2,0), col1=0, col2=1,
# temp #duration = 5 tw, th = target_clip.size sw, sh = source_clip.size if (tw != sw) or (th != sh): # do resizing. not important now bc vids are same size pass width, height = (tw, th) # make left clip - target left_mask = color_split((3 * width / 4, height), p1=(3 * width / 4, height), p2=(3 * width / 4, 0), col1=1, col2=0, grad_width=2) mask_clip_l = ImageClip(left_mask, ismask=True) clip_left = (target_clip.copy().subclip(0, duration).set_mask(mask_clip_l)) # make right clip - source right_mask = color_split((3 * width / 4, height), p1=(3 * width / 4, height), p2=(3 * width / 4, 0), col1=1, col2=0, grad_width=2) mask_clip_r = ImageClip(right_mask, ismask=True)