def test_rotate(): clip = get_test_video() clip1 = rotate(clip, 90) # rotate 90 degrees assert clip1.size == (clip.size[1], clip.size[0]) clip1.write_videofile(os.path.join(TMP_DIR, "rotate1.webm")) clip2 = rotate(clip, 180) # rotate 90 degrees assert clip2.size == tuple(clip.size) clip2.write_videofile(os.path.join(TMP_DIR, "rotate2.webm")) clip3 = rotate(clip, 270) # rotate 90 degrees assert clip3.size == (clip.size[1], clip.size[0]) clip3.write_videofile(os.path.join(TMP_DIR, "rotate3.webm")) clip4 = rotate(clip, 360) # rotate 90 degrees assert clip4.size == tuple(clip.size) clip4.write_videofile(os.path.join(TMP_DIR, "rotate4.webm")) clip5 = rotate(clip, 50) clip5.write_videofile(os.path.join(TMP_DIR, "rotate5.webm")) # Test rotate with color clip clip = ColorClip([600, 400], [150, 250, 100]).set_duration(1).set_fps(5) clip = rotate(clip, 20) clip.write_videofile(os.path.join(TMP_DIR, "color_rotate.webm")) close_all_clips(locals())
def test_release_of_file_via_close(): # Create a random video file. red = ColorClip((256, 200), color=(255, 0, 0)) green = ColorClip((256, 200), color=(0, 255, 0)) blue = ColorClip((256, 200), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 10 # Repeat this so we can see no conflicts. for i in range(3): # Get the name of a temporary file we can use. local_video_filename = os.path.join( TMP_DIR, "test_release_of_file_via_close_%s.mp4" % int(time.time())) clip = clips_array([[red, green, blue]]).with_duration(0.5) clip.write_videofile(local_video_filename) # Open it up with VideoFileClip. video = VideoFileClip(local_video_filename) video.close() clip.close() # Now remove the temporary file. # This would fail on Windows if the file is still locked. # This should succeed without exceptions. os.remove(local_video_filename) red.close() green.close() blue.close()
def test_issue_407(): red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(5) red.fps = 30 assert red.fps == 30 assert red.w == 800 assert red.h == 600 assert red.size == (800, 600) # ColorClip has no fps attribute. green = ColorClip((640, 480), color=(0, 255, 0)).with_duration(2) blue = ColorClip((640, 480), color=(0, 0, 255)).with_duration(2) assert green.w == blue.w == 640 assert green.h == blue.h == 480 assert green.size == blue.size == (640, 480) with pytest.raises(AttributeError): green.fps with pytest.raises(AttributeError): blue.fps video = concatenate_videoclips([red, green, blue]) assert video.fps == red.fps
def test_PR_424(): """Ensure deprecation and user warnings are triggered.""" import warnings warnings.simplefilter('always') # Alert us of deprecation warnings. # Recommended use ColorClip([1000, 600], color=(60, 60, 60), duration=10).close() with pytest.warns(DeprecationWarning): # Uses `col` so should work the same as above, but give warning. ColorClip([1000, 600], col=(60, 60, 60), duration=10).close() # Catch all warnings as record. with pytest.warns(None) as record: # Should give 2 warnings and use `color`, not `col` ColorClip([1000, 600], color=(60, 60, 60), duration=10, col=(2, 2, 2)).close() message1 = 'The `ColorClip` parameter `col` has been deprecated. ' + \ 'Please use `color` instead.' message2 = 'The arguments `color` and `col` have both been passed to ' + \ '`ColorClip` so `col` has been ignored.' # Assert that two warnings popped and validate the message text. assert len(record) == 2 assert str(record[0].message) == message1 assert str(record[1].message) == message2
def test_write_gif(util, clip_class, opt, loop, with_mask, pixel_format): filename = os.path.join(util.TMP_DIR, "moviepy_write_gif.gif") if os.path.isfile(filename): os.remove(filename) fps = 10 if clip_class == "BitmapClip": original_clip = BitmapClip([["R"], ["G"], ["B"]], fps=fps).with_duration(0.3) else: original_clip = concatenate_videoclips([ ColorClip( (1, 1), color=color, ).with_duration(0.1).with_fps(fps) for color in [(255, 0, 0), (0, 255, 0), (0, 0, 255)] ]) if with_mask: original_clip = original_clip.with_mask( ColorClip((1, 1), color=1, is_mask=True).with_fps(fps).with_duration(0.3)) kwargs = {} if pixel_format is not None: kwargs["pixel_format"] = pixel_format write_gif( original_clip, filename, fps=fps, with_mask=with_mask, program="ffmpeg", logger=None, opt=opt, loop=loop, **kwargs, ) if pixel_format != "invalid": final_clip = VideoFileClip(filename) r, g, b = final_clip.get_frame(0)[0][0] assert r == 252 assert g == 0 assert b == 0 r, g, b = final_clip.get_frame(0.1)[0][0] assert r == 0 assert g == 252 assert b == 0 r, g, b = final_clip.get_frame(0.2)[0][0] assert r == 0 assert g == 0 assert b == 255 assert final_clip.duration == (loop or 1) * round( original_clip.duration, 6)
def test_subtitles(): red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 generator = lambda txt: TextClip(txt, font=FONT, size=(800, 600), fontsize=24, method='caption', align='South', color='white') subtitles = SubtitlesClip("media/subtitles1.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30) data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'), ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'), ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')] assert subtitles.subtitles == data subtitles = SubtitlesClip(data, generator) assert subtitles.subtitles == data close_all_clips(locals())
def test_subtitles(): red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 #travis does not like TextClip.. so return for now.. #but allow regular users to still run the test below if TRAVIS: return generator = lambda txt: TextClip(txt, font='Liberation-Mono', size=(800, 600), fontsize=24, method='caption', align='South', color='white') subtitles = SubtitlesClip("media/subtitles1.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30) data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'), ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'), ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')] assert subtitles.subtitles == data subtitles = SubtitlesClip(data, generator) assert subtitles.subtitles == data
def test_subtitles(util): red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 generator = lambda txt: TextClip( txt, font=util.FONT, size=(800, 600), font_size=24, method="caption", align="South", color="white", ) subtitles = SubtitlesClip("media/subtitles.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.subclip(0, 0.5).write_videofile( os.path.join(util.TMP_DIR, "subtitles.mp4"), fps=5, logger=None, ) assert subtitles.subtitles == MEDIA_SUBTITLES_DATA subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator) assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
def test_release_of_file_via_close(): # Create a random video file. red = ColorClip((1024, 800), color=(255, 0, 0)) green = ColorClip((1024, 800), color=(0, 255, 0)) blue = ColorClip((1024, 800), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 30 # Repeat this so we can see no conflicts. for i in range(5): # Get the name of a temporary file we can use. local_video_filename = join( TMP_DIR, "test_release_of_file_via_close_%s.mp4" % int(time.time())) with clips_array([[red, green, blue]]) as ca: video = ca.set_duration(1) video.write_videofile(local_video_filename) # Open it up with VideoFileClip. with VideoFileClip(local_video_filename) as clip: # Normally a client would do processing here. pass # Now remove the temporary file. # This would fail on Windows if the file is still locked. # This should succeed without exceptions. remove(local_video_filename) red.close() green.close() blue.close()
def test_failure_to_release_file(): """ This isn't really a test, because it is expected to fail. It demonstrates that there *is* a problem with not releasing resources when running on Windows. The real issue was that, as of movepy 0.2.3.2, there was no way around it. See test_resourcerelease.py to see how the close() methods provide a solution. """ # Get the name of a temporary file we can use. local_video_filename = join( TMP_DIR, "test_release_of_file_%s.mp4" % int(time.time()) ) # Repeat this so we can see that the problems escalate: for i in range(5): # Create a random video file. red = ColorClip((256, 200), color=(255, 0, 0)) green = ColorClip((256, 200), color=(0, 255, 0)) blue = ColorClip((256, 200), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 30 video = clips_array([[red, green, blue]]).set_duration(1) try: video.write_videofile(local_video_filename) # Open it up with VideoFileClip. clip = VideoFileClip(local_video_filename) # Normally a client would do processing here. # All finished, so delete the clipS. clip.close() video.close() del clip del video except IOError: print( "On Windows, this succeeds the first few times around the loop" " but eventually fails." ) print("Need to shut down the process now. No more tests in" "this file.") return try: # Now remove the temporary file. # This will fail on Windows if the file is still locked. # In particular, this raises an exception with PermissionError. # In there was no way to avoid it. remove(local_video_filename) print("You are not running Windows, because that worked.") except OSError: # More specifically, PermissionError in Python 3. print("Yes, on Windows this fails.")
def test_PR_610(): """Test that the max fps of video clips is used for the composite video clip.""" clip1 = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1) clip2 = ColorClip((640, 480), color=(0, 255, 0)).with_duration(1) clip1.fps = 24 clip2.fps = 25 composite = CompositeVideoClip([clip1, clip2]) assert composite.fps == 25
def test_detect_scenes(): """Test that a cut is detected between concatenated red and green clips.""" red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1) green = ColorClip((640, 480), color=(0, 200, 0)).with_duration(1) video = concatenate_videoclips([red, green]) cuts, luminosities = detect_scenes(video, fps=10, logger=None) assert len(cuts) == 2
def test_slide_in(): duration = 0.1 size = (10, 1) fps = 10 color = (255, 0, 0) # left and right sides clip = ColorClip( color=color, duration=duration, size=size, ).with_fps(fps) for side in ["left", "right"]: new_clip = CompositeVideoClip([slide_in(clip, duration, side)]) for t in np.arange(0, duration, duration / fps): n_reds, n_reds_expected = (0, int(t * 100)) if t: assert n_reds_expected if n_reds_expected == 7: # skip 7 due to innacurate frame continue for r, g, b in new_clip.get_frame(t)[0]: if r == color[0] and g == color[1] and g == color[2]: n_reds += 1 assert n_reds == n_reds_expected # top and bottom sides clip = ColorClip( color=color, duration=duration, size=(size[1], size[0]), ).with_fps(fps) for side in ["top", "bottom"]: new_clip = CompositeVideoClip([slide_in(clip, duration, side)]) for t in np.arange(0, duration, duration / fps): n_reds, n_reds_expected = (0, int(t * 100)) if t: assert n_reds_expected if n_reds_expected == 7: # skip 7 due to innacurate frame continue for row in new_clip.get_frame(t): r, g, b = row[0] if r == color[0] and g == color[1] and g == color[2]: n_reds += 1 assert n_reds == n_reds_expected
def test_clips_array(util): red = ColorClip((1024, 800), color=(255, 0, 0)) green = ColorClip((1024, 800), color=(0, 255, 0)) blue = ColorClip((1024, 800), color=(0, 0, 255)) video = clips_array([[red, green, blue]]) with pytest.raises(ValueError): # duration not set video.fx(resize, width=480).write_videofile( os.path.join(util.TMP_DIR, "test_clips_array.mp4"))
def test_issue_547(): red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1) green = ColorClip((640, 480), color=(0, 255, 0)).with_duration(2) blue = ColorClip((640, 480), color=(0, 0, 255)).with_duration(3) video = concatenate_videoclips([red, green, blue], method="compose") assert video.duration == 6 assert video.mask.duration == 6 video = concatenate_videoclips([red, green, blue]) assert video.duration == 6
def test_oncolor(): # It doesn't need to be a ColorClip clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=4) on_color_clip = clip.on_color(size=(200, 160), color=(0, 0, 255)) location = os.path.join(TMP_DIR, "oncolor.mp4") on_color_clip.write_videofile(location, fps=24) assert os.path.isfile(location)
def test_clip_with_duration( duration, start, end, new_duration, change_end, expected_duration, expected_start, expected_end, ): clip = ColorClip(color=(255, 0, 0), size=(2, 2)).with_fps(1).with_duration(duration) if start is not None: clip = clip.with_start(start) if end is not None: clip = clip.with_end(end) if hasattr(expected_duration, "__traceback__"): with pytest.raises(expected_duration): clip.with_duration(new_duration, change_end=change_end) else: clip = clip.with_duration(new_duration, change_end=change_end) assert clip.duration == expected_duration assert clip.start == expected_start assert clip.end == expected_end
def test_setaudio_with_audiofile(util): clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) audio = AudioFileClip("media/crunching.mp3").subclip(0, 0.5) clip = clip.with_audio(audio) location = os.path.join(util.TMP_DIR, "setaudiofile.mp4") clip.write_videofile(location, fps=24) assert os.path.isfile(location)
def test_setup(): """Test VideoFileClip setup.""" red = ColorClip((1024, 800), color=(255, 0, 0)) green = ColorClip((1024, 800), color=(0, 255, 0)) blue = ColorClip((1024, 800), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 30 video = clips_array([[red, green, blue]]).set_duration(5) video.write_videofile("/tmp/test.mp4") assert os.path.exists("/tmp/test.mp4") clip = VideoFileClip("/tmp/test.mp4") assert clip.duration == 5 assert clip.fps == 30 assert clip.size == [1024 * 3, 800]
def create_video(dependencies, targets): backing_track_path = output_dir_path / 'accompaniment.wav' with open(sync_map_path(output_dir_path), encoding='utf-8') as sync_json_file, \ open(silences_path(output_dir_path), encoding='utf-8') as silence_json_file: lyric_clips = list( _generate_lyric_clips( json.load(sync_json_file), json.load(silence_json_file) ) ) backing_track_clip = AudioFileClip(str(backing_track_path)) background_clip = ColorClip( size=(1024, 768), color=[0, 0, 0], duration=backing_track_clip.duration ) karaoke = ( CompositeVideoClip([background_clip] + lyric_clips). set_duration(backing_track_clip.duration). set_audio(backing_track_clip) ) karaoke.write_videofile( str(targets[0]), fps=10, # Workaround for missing audio # https://github.com/Zulko/moviepy/issues/820 codec='libx264', audio_codec='aac', temp_audiofile='temp-audio.m4a', remove_temp=True )
def test_afterimage(): ai = ImageClip("media/afterimage.png") masked_clip = mask_color(ai, color=[0, 255, 1]) # for green some_background_clip = ColorClip((800, 600), color=(255, 255, 255)) final_clip = CompositeVideoClip([some_background_clip, masked_clip], use_bgclip=True).with_duration(0.2) final_clip.write_videofile(os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
def test_clips_array_duration(): # NOTE: anyone knows what behaviour this sets ? If yes please replace # this comment. red = ColorClip((256, 200), color=(255, 0, 0)) green = ColorClip((256, 200), color=(0, 255, 0)) blue = ColorClip((256, 200), color=(0, 0, 255)) video = clips_array([[red, green, blue]]).with_duration(5) with pytest.raises(AttributeError): # fps not set video.write_videofile(os.path.join(TMP_DIR, "test_clips_array.mp4")) # this one should work correctly red.fps = green.fps = blue.fps = 30 video = clips_array([[red, green, blue]]).with_duration(5) video.write_videofile(os.path.join(TMP_DIR, "test_clips_array.mp4")) close_all_clips(locals())
def test_setup(): """Test VideoFileClip setup.""" red = ColorClip((256, 200), color=(255, 0, 0)) green = ColorClip((256, 200), color=(0, 255, 0)) blue = ColorClip((256, 200), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 10 with clips_array([[red, green, blue]]).set_duration(5) as video: video.write_videofile(os.path.join(TMP_DIR, "test.mp4")) assert os.path.exists(os.path.join(TMP_DIR, "test.mp4")) clip = VideoFileClip(os.path.join(TMP_DIR, "test.mp4")) assert clip.duration == 5 assert clip.fps == 10 assert clip.size == [256 * 3, 200] close_all_clips(locals())
def test_setaudio(util): clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) make_frame_440 = lambda t: [np.sin(440 * 2 * np.pi * t)] audio = AudioClip(make_frame_440, duration=0.5) audio.fps = 44100 clip = clip.with_audio(audio) location = os.path.join(util.TMP_DIR, "setaudio.mp4") clip.write_videofile(location, fps=24) assert os.path.isfile(location)
def generate_intro(): logger.info('Generating intro...') color = (255, 255, 255) size = (1280, 720) clip = ColorClip(size, color, duration=3) logo = ImageClip(config.LOGO_PATH).set_duration(clip.duration) \ .resize(width=400, height=200) \ .set_pos(('center', 'center')) return CompositeVideoClip([clip, logo])
def test_oncolor(util): # It doesn't need to be a ColorClip clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) on_color_clip = clip.on_color(size=(200, 160), color=(0, 0, 255)) location = os.path.join(util.TMP_DIR, "oncolor.mp4") on_color_clip.write_videofile(location, fps=24) assert os.path.isfile(location) # test constructor with default arguments clip = ColorClip(size=(100, 60), is_mask=True) clip = ColorClip(size=(100, 60), is_mask=False) # negative test with pytest.raises(Exception): clip = ColorClip(size=(100, 60), color=(255, 0, 0), is_mask=True) with pytest.raises(Exception): clip = ColorClip(size=(100, 60), color=0.4, is_mask=False)
def test_clip_with_end(duration, start, end, expected_start, expected_duration): clip = ColorClip(color=(255, 0, 0), size=(2, 2), duration=duration).with_fps(1) if start is not None: clip = clip.with_start(start) else: clip.start = None clip = clip.with_end(end) assert clip.start == expected_start assert clip.duration == expected_duration
def test_setup(util): """Test VideoFileClip setup.""" filename = os.path.join(util.TMP_DIR, "test.mp4") red = ColorClip((256, 200), color=(255, 0, 0)) green = ColorClip((256, 200), color=(0, 255, 0)) blue = ColorClip((256, 200), color=(0, 0, 255)) red.fps = green.fps = blue.fps = 10 with clips_array([[red, green, blue]]).with_duration(5) as video: video.write_videofile(filename, logger=None) assert os.path.exists(filename) clip = VideoFileClip(filename) assert clip.duration == 5 assert clip.fps == 10 assert clip.size == [256 * 3, 200] assert clip.reader.bitrate == 2
def test_setaudio(): clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5) make_frame_440 = lambda t: [sin(440 * 2 * pi * t)] audio = AudioClip(make_frame_440, duration=0.5) audio.fps = 44100 clip = clip.set_audio(audio) location = os.path.join(TMP_DIR, "setaudio.mp4") clip.write_videofile(location, fps=24) assert os.path.isfile(location) close_all_clips(locals())
def triple_effect(self, clip, mask_clip, width, height): red_clip = ColorClip(clip.size, (255, 0, 0), duration=clip.duration) blue_clip = ColorClip(clip.size, (0, 0, 255), duration=clip.duration) center_person_clip = clip.set_mask(mask_clip).set_position( "center", "center") left_person_clip = red_clip.set_mask(mask_clip).set_opacity(0.5) right_person_clip = blue_clip.set_mask(mask_clip).set_opacity(0.5) left_person_clip_x = (width / 2 - left_person_clip.w / 2) - int( left_person_clip.w * 0.3) right_person_clip_x = (width / 2 - left_person_clip.w / 2) + int( left_person_clip.w * 0.3) person_clip_y = height / 2 - left_person_clip.h / 2 left_person_clip = left_person_clip.set_position( (left_person_clip_x, person_clip_y)) right_person_clip = right_person_clip.set_position( (right_person_clip_x, person_clip_y)) return [left_person_clip, right_person_clip, center_person_clip]