示例#1
1
def test_rotate():
    clip = get_test_video()

    clip1 = rotate(clip, 90)  # rotate 90 degrees
    assert clip1.size == (clip.size[1], clip.size[0])
    clip1.write_videofile(os.path.join(TMP_DIR, "rotate1.webm"))

    clip2 = rotate(clip, 180)  # rotate 90 degrees
    assert clip2.size == tuple(clip.size)
    clip2.write_videofile(os.path.join(TMP_DIR, "rotate2.webm"))

    clip3 = rotate(clip, 270)  # rotate 90 degrees
    assert clip3.size == (clip.size[1], clip.size[0])
    clip3.write_videofile(os.path.join(TMP_DIR, "rotate3.webm"))

    clip4 = rotate(clip, 360)  # rotate 90 degrees
    assert clip4.size == tuple(clip.size)
    clip4.write_videofile(os.path.join(TMP_DIR, "rotate4.webm"))

    clip5 = rotate(clip, 50)
    clip5.write_videofile(os.path.join(TMP_DIR, "rotate5.webm"))

    # Test rotate with color clip
    clip = ColorClip([600, 400], [150, 250, 100]).set_duration(1).set_fps(5)
    clip = rotate(clip, 20)
    clip.write_videofile(os.path.join(TMP_DIR, "color_rotate.webm"))

    close_all_clips(locals())
示例#2
0
def test_issue_407():
    red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(5)
    red.fps = 30

    assert red.fps == 30
    assert red.w == 800
    assert red.h == 600
    assert red.size == (800, 600)

    # ColorClip has no fps attribute.
    green = ColorClip((640, 480), color=(0, 255, 0)).with_duration(2)
    blue = ColorClip((640, 480), color=(0, 0, 255)).with_duration(2)

    assert green.w == blue.w == 640
    assert green.h == blue.h == 480
    assert green.size == blue.size == (640, 480)

    with pytest.raises(AttributeError):
        green.fps

    with pytest.raises(AttributeError):
        blue.fps

    video = concatenate_videoclips([red, green, blue])
    assert video.fps == red.fps
示例#3
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
示例#4
0
def test_setaudio_with_audiofile(util):
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    audio = AudioFileClip("media/crunching.mp3").subclip(0, 0.5)
    clip = clip.with_audio(audio)
    location = os.path.join(util.TMP_DIR, "setaudiofile.mp4")
    clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
示例#5
0
def test_write_gif(util, clip_class, opt, loop, with_mask, pixel_format):
    filename = os.path.join(util.TMP_DIR, "moviepy_write_gif.gif")
    if os.path.isfile(filename):
        os.remove(filename)

    fps = 10

    if clip_class == "BitmapClip":
        original_clip = BitmapClip([["R"], ["G"], ["B"]],
                                   fps=fps).with_duration(0.3)
    else:
        original_clip = concatenate_videoclips([
            ColorClip(
                (1, 1),
                color=color,
            ).with_duration(0.1).with_fps(fps)
            for color in [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
        ])
    if with_mask:
        original_clip = original_clip.with_mask(
            ColorClip((1, 1), color=1,
                      is_mask=True).with_fps(fps).with_duration(0.3))

    kwargs = {}
    if pixel_format is not None:
        kwargs["pixel_format"] = pixel_format

    write_gif(
        original_clip,
        filename,
        fps=fps,
        with_mask=with_mask,
        program="ffmpeg",
        logger=None,
        opt=opt,
        loop=loop,
        **kwargs,
    )

    if pixel_format != "invalid":

        final_clip = VideoFileClip(filename)

        r, g, b = final_clip.get_frame(0)[0][0]
        assert r == 252
        assert g == 0
        assert b == 0

        r, g, b = final_clip.get_frame(0.1)[0][0]
        assert r == 0
        assert g == 252
        assert b == 0

        r, g, b = final_clip.get_frame(0.2)[0][0]
        assert r == 0
        assert g == 0
        assert b == 255

        assert final_clip.duration == (loop or 1) * round(
            original_clip.duration, 6)
示例#6
0
def test_PR_424():
    """Ensure deprecation and user warnings are triggered."""
    import warnings
    warnings.simplefilter('always')  # Alert us of deprecation warnings.

    # Recommended use
    ColorClip([1000, 600], color=(60, 60, 60), duration=10).close()

    with pytest.warns(DeprecationWarning):
        # Uses `col` so should work the same as above, but give warning.
        ColorClip([1000, 600], col=(60, 60, 60), duration=10).close()

    # Catch all warnings as record.
    with pytest.warns(None) as record:
        # Should give 2 warnings and use `color`, not `col`
        ColorClip([1000, 600], color=(60, 60, 60), duration=10,
                  col=(2, 2, 2)).close()

    message1 = 'The `ColorClip` parameter `col` has been deprecated. ' + \
               'Please use `color` instead.'
    message2 = 'The arguments `color` and `col` have both been passed to ' + \
               '`ColorClip` so `col` has been ignored.'

    # Assert that two warnings popped and validate the message text.
    assert len(record) == 2
    assert str(record[0].message) == message1
    assert str(record[1].message) == message2
示例#7
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    #travis does not like TextClip.. so return for now..
    #but allow regular users to still run the test below
    if TRAVIS:
        return

    generator = lambda txt: TextClip(txt,
                                     font='Liberation-Mono',
                                     size=(800, 600),
                                     fontsize=24,
                                     method='caption',
                                     align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
示例#8
0
def test_oncolor():
    # It doesn't need to be a ColorClip
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=4)
    on_color_clip = clip.on_color(size=(200, 160), color=(0, 0, 255))
    location = os.path.join(TMP_DIR, "oncolor.mp4")
    on_color_clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
def test_subtitles(util):
    red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(
        txt,
        font=util.FONT,
        size=(800, 600),
        font_size=24,
        method="caption",
        align="South",
        color="white",
    )

    subtitles = SubtitlesClip("media/subtitles.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.subclip(0, 0.5).write_videofile(
        os.path.join(util.TMP_DIR, "subtitles.mp4"),
        fps=5,
        logger=None,
    )

    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA

    subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator)
    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
def test_failure_to_release_file():
    """ This isn't really a test, because it is expected to fail.
        It demonstrates that there *is* a problem with not releasing resources when running on 
        Windows.

        The real issue was that, as of movepy 0.2.3.2, there was no way around it.

        See test_resourcerelease.py to see how the close() methods provide a solution.
    """

    # Get the name of a temporary file we can use.
    local_video_filename = join(
        TMP_DIR, "test_release_of_file_%s.mp4" % int(time.time())
    )

    # Repeat this so we can see that the problems escalate:
    for i in range(5):

        # Create a random video file.
        red = ColorClip((256, 200), color=(255, 0, 0))
        green = ColorClip((256, 200), color=(0, 255, 0))
        blue = ColorClip((256, 200), color=(0, 0, 255))

        red.fps = green.fps = blue.fps = 30
        video = clips_array([[red, green, blue]]).set_duration(1)

        try:
            video.write_videofile(local_video_filename)

            # Open it up with VideoFileClip.
            clip = VideoFileClip(local_video_filename)

            # Normally a client would do processing here.

            # All finished, so delete the clipS.
            clip.close()
            video.close()
            del clip
            del video

        except IOError:
            print(
                "On Windows, this succeeds the first few times around the loop"
                " but eventually fails."
            )
            print("Need to shut down the process now. No more tests in" "this file.")
            return

        try:
            # Now remove the temporary file.
            # This will fail on Windows if the file is still locked.

            # In particular, this raises an exception with PermissionError.
            # In  there was no way to avoid it.

            remove(local_video_filename)
            print("You are not running Windows, because that worked.")
        except OSError:  # More specifically, PermissionError in Python 3.
            print("Yes, on Windows this fails.")
示例#11
0
def test_oncolor():
    # It doesn't need to be a ColorClip
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    on_color_clip = clip.on_color(size=(200, 160), color=(0, 0, 255))
    location = os.path.join(TMP_DIR, "oncolor.mp4")
    on_color_clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
    close_all_clips(locals())
示例#12
0
def test_setaudio_with_audiofile():
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    audio = AudioFileClip("media/crunching.mp3").subclip(0, 0.5)
    clip = clip.set_audio(audio)
    location = os.path.join(TMP_DIR, "setaudiofile.mp4")
    clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
    close_all_clips(locals())
示例#13
0
def test_setaudio(util):
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    make_frame_440 = lambda t: [np.sin(440 * 2 * np.pi * t)]
    audio = AudioClip(make_frame_440, duration=0.5)
    audio.fps = 44100
    clip = clip.with_audio(audio)
    location = os.path.join(util.TMP_DIR, "setaudio.mp4")
    clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
示例#14
0
def test_detect_scenes():
    """Test that a cut is detected between concatenated red and green clips."""
    red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
    green = ColorClip((640, 480), color=(0, 200, 0)).with_duration(1)
    video = concatenate_videoclips([red, green])

    cuts, luminosities = detect_scenes(video, fps=10, logger=None)

    assert len(cuts) == 2
示例#15
0
def test_slide_in():
    duration = 0.1
    size = (10, 1)
    fps = 10
    color = (255, 0, 0)

    # left and right sides
    clip = ColorClip(
        color=color,
        duration=duration,
        size=size,
    ).with_fps(fps)

    for side in ["left", "right"]:
        new_clip = CompositeVideoClip([slide_in(clip, duration, side)])

        for t in np.arange(0, duration, duration / fps):
            n_reds, n_reds_expected = (0, int(t * 100))

            if t:
                assert n_reds_expected

            if n_reds_expected == 7:  # skip 7 due to innacurate frame
                continue

            for r, g, b in new_clip.get_frame(t)[0]:
                if r == color[0] and g == color[1] and g == color[2]:
                    n_reds += 1

            assert n_reds == n_reds_expected

    # top and bottom sides
    clip = ColorClip(
        color=color,
        duration=duration,
        size=(size[1], size[0]),
    ).with_fps(fps)

    for side in ["top", "bottom"]:
        new_clip = CompositeVideoClip([slide_in(clip, duration, side)])
        for t in np.arange(0, duration, duration / fps):
            n_reds, n_reds_expected = (0, int(t * 100))

            if t:
                assert n_reds_expected

            if n_reds_expected == 7:  # skip 7 due to innacurate frame
                continue

            for row in new_clip.get_frame(t):
                r, g, b = row[0]

                if r == color[0] and g == color[1] and g == color[2]:
                    n_reds += 1

            assert n_reds == n_reds_expected
示例#16
0
def test_setaudio():
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    make_frame_440 = lambda t: [sin(440 * 2 * pi * t)]
    audio = AudioClip(make_frame_440, duration=0.5)
    audio.fps = 44100
    clip = clip.set_audio(audio)
    location = os.path.join(TMP_DIR, "setaudio.mp4")
    clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
    close_all_clips(locals())
示例#17
0
def test_setaudio():
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    make_frame_440 = lambda t: [sin(440 * 2 * pi * t)]
    audio = AudioClip(make_frame_440, duration=0.5)
    audio.fps = 44100
    clip = clip.set_audio(audio)
    location = os.path.join(TMP_DIR, "setaudio.mp4")
    clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
    close_all_clips(locals())
示例#18
0
def test_clip_with_end(duration, start, end, expected_start, expected_duration):
    clip = ColorClip(color=(255, 0, 0), size=(2, 2), duration=duration).with_fps(1)
    if start is not None:
        clip = clip.with_start(start)
    else:
        clip.start = None
    clip = clip.with_end(end)

    assert clip.start == expected_start
    assert clip.duration == expected_duration
示例#19
0
def test_clips_array(util):
    red = ColorClip((1024, 800), color=(255, 0, 0))
    green = ColorClip((1024, 800), color=(0, 255, 0))
    blue = ColorClip((1024, 800), color=(0, 0, 255))

    video = clips_array([[red, green, blue]])

    with pytest.raises(ValueError):  # duration not set
        video.fx(resize, width=480).write_videofile(
            os.path.join(util.TMP_DIR, "test_clips_array.mp4"))
示例#20
0
def test_release_of_file_via_close():
    # Create a random video file.
    red = ColorClip((1024, 800), color=(255, 0, 0))
    green = ColorClip((1024, 800), color=(0, 255, 0))
    blue = ColorClip((1024, 800), color=(0, 0, 255))

    red.fps = green.fps = blue.fps = 30

    # Repeat this so we can see no conflicts.
    for i in range(5):
        # Get the name of a temporary file we can use.
        local_video_filename = join(TMP_DIR, "test_release_of_file_via_close_%s.mp4" % int(time.time()))

        with clips_array([[red, green, blue]]) as ca:
            video = ca.set_duration(1)

            video.write_videofile(local_video_filename)

        # Open it up with VideoFileClip.
        with VideoFileClip(local_video_filename) as clip:
            # Normally a client would do processing here.
            pass

        # Now remove the temporary file.
        # This would fail on Windows if the file is still locked.

        # This should succeed without exceptions.
        remove(local_video_filename)

    red.close()
    green.close()
    blue.close()
示例#21
0
def test_PR_610():
    """
    Test that the max fps of the video clips is used for the composite video clip
    """
    clip1 = ColorClip((640, 480), color=(255, 0, 0)).set_duration(1)
    clip2 = ColorClip((640, 480), color=(0, 255, 0)).set_duration(1)
    clip1.fps = 24
    clip2.fps = 25
    composite = CompositeVideoClip([clip1, clip2])

    assert composite.fps == 25
示例#22
0
def test_issue_547():
    red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
    green = ColorClip((640, 480), color=(0, 255, 0)).with_duration(2)
    blue = ColorClip((640, 480), color=(0, 0, 255)).with_duration(3)

    video = concatenate_videoclips([red, green, blue], method="compose")
    assert video.duration == 6
    assert video.mask.duration == 6

    video = concatenate_videoclips([red, green, blue])
    assert video.duration == 6
示例#23
0
def test_setup():
    """Test VideoFileClip setup."""
    red = ColorClip((1024, 800), color=(255, 0, 0))
    green = ColorClip((1024, 800), color=(0, 255, 0))
    blue = ColorClip((1024, 800), color=(0, 0, 255))

    red.fps = green.fps = blue.fps = 30
    video = clips_array([[red, green, blue]]).set_duration(5)
    video.write_videofile("/tmp/test.mp4")

    assert os.path.exists("/tmp/test.mp4")

    clip = VideoFileClip("/tmp/test.mp4")
    assert clip.duration == 5
    assert clip.fps == 30
    assert clip.size == [1024 * 3, 800]
示例#24
0
 def create_video(dependencies, targets):
     backing_track_path = output_dir_path / 'accompaniment.wav'
     with open(sync_map_path(output_dir_path), encoding='utf-8') as sync_json_file, \
         open(silences_path(output_dir_path), encoding='utf-8') as silence_json_file:
         lyric_clips = list(
             _generate_lyric_clips(
                 json.load(sync_json_file),
                 json.load(silence_json_file)
             )
         )
     backing_track_clip = AudioFileClip(str(backing_track_path))
     background_clip = ColorClip(
         size=(1024, 768), color=[0, 0, 0],
         duration=backing_track_clip.duration
     )
     karaoke = (
         CompositeVideoClip([background_clip] + lyric_clips).
         set_duration(backing_track_clip.duration).
         set_audio(backing_track_clip)
     )
     karaoke.write_videofile(
         str(targets[0]),
         fps=10,
         # Workaround for missing audio
         # https://github.com/Zulko/moviepy/issues/820
         codec='libx264',
         audio_codec='aac',
         temp_audiofile='temp-audio.m4a',
         remove_temp=True
     )
示例#25
0
def test_afterimage():
    ai = ImageClip("media/afterimage.png")
    masked_clip = mask_color(ai, color=[0, 255, 1])  # for green
    some_background_clip = ColorClip((800, 600), color=(255, 255, 255))
    final_clip = CompositeVideoClip([some_background_clip, masked_clip],
                                    use_bgclip=True).with_duration(0.2)
    final_clip.write_videofile(os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
示例#26
0
def test_clips_array_duration():
    # NOTE: anyone knows what behaviour this sets ? If yes please replace
    # this comment.
    red = ColorClip((256, 200), color=(255, 0, 0))
    green = ColorClip((256, 200), color=(0, 255, 0))
    blue = ColorClip((256, 200), color=(0, 0, 255))

    video = clips_array([[red, green, blue]]).with_duration(5)
    with pytest.raises(AttributeError):  # fps not set
        video.write_videofile(os.path.join(TMP_DIR, "test_clips_array.mp4"))

    # this one should work correctly
    red.fps = green.fps = blue.fps = 30
    video = clips_array([[red, green, blue]]).with_duration(5)
    video.write_videofile(os.path.join(TMP_DIR, "test_clips_array.mp4"))
    close_all_clips(locals())
示例#27
0
def test_setup():
    """Test VideoFileClip setup."""
    red = ColorClip((256, 200), color=(255, 0, 0))
    green = ColorClip((256, 200), color=(0, 255, 0))
    blue = ColorClip((256, 200), color=(0, 0, 255))

    red.fps = green.fps = blue.fps = 10
    with clips_array([[red, green, blue]]).set_duration(5) as video:
        video.write_videofile(os.path.join(TMP_DIR, "test.mp4"))

    assert os.path.exists(os.path.join(TMP_DIR, "test.mp4"))

    clip = VideoFileClip(os.path.join(TMP_DIR, "test.mp4"))
    assert clip.duration == 5
    assert clip.fps == 10
    assert clip.size == [256 * 3, 200]
    close_all_clips(locals())
示例#28
0
def test_clip_with_duration(
    duration,
    start,
    end,
    new_duration,
    change_end,
    expected_duration,
    expected_start,
    expected_end,
):
    clip = ColorClip(color=(255, 0, 0), size=(2, 2)).with_fps(1).with_duration(duration)
    if start is not None:
        clip = clip.with_start(start)
    if end is not None:
        clip = clip.with_end(end)

    if hasattr(expected_duration, "__traceback__"):
        with pytest.raises(expected_duration):
            clip.with_duration(new_duration, change_end=change_end)
    else:
        clip = clip.with_duration(new_duration, change_end=change_end)

        assert clip.duration == expected_duration
        assert clip.start == expected_start
        assert clip.end == expected_end
示例#29
0
 def generate_intro():
     logger.info('Generating intro...')
     color = (255, 255, 255)
     size = (1280, 720)
     clip = ColorClip(size, color, duration=3)
     logo = ImageClip(config.LOGO_PATH).set_duration(clip.duration) \
         .resize(width=400, height=200) \
         .set_pos(('center', 'center'))
     return CompositeVideoClip([clip, logo])
示例#30
0
def test_setup(util):
    """Test VideoFileClip setup."""
    filename = os.path.join(util.TMP_DIR, "test.mp4")

    red = ColorClip((256, 200), color=(255, 0, 0))
    green = ColorClip((256, 200), color=(0, 255, 0))
    blue = ColorClip((256, 200), color=(0, 0, 255))

    red.fps = green.fps = blue.fps = 10
    with clips_array([[red, green, blue]]).with_duration(5) as video:
        video.write_videofile(filename, logger=None)

    assert os.path.exists(filename)

    clip = VideoFileClip(filename)
    assert clip.duration == 5
    assert clip.fps == 10
    assert clip.size == [256 * 3, 200]
    assert clip.reader.bitrate == 2
示例#31
0
def test_afterimage():
    ai = ImageClip("media/afterimage.png")
    masked_clip = mask_color(ai, color=[0, 255, 1])  # for green

    some_background_clip = ColorClip((800, 600), color=(255, 255, 255))

    final_clip = CompositeVideoClip([some_background_clip, masked_clip],
                                    use_bgclip=True)
    final_clip.duration = 5
    final_clip.write_videofile("/tmp/afterimage.mp4", fps=30)
示例#32
0
    def __init__(self,
                 clips,
                 size=None,
                 bg_color=None,
                 transparent=False,
                 ismask=False):

        if size is None:
            size = clips[0].size

        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)

        VideoClip.__init__(self)

        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.transparent = transparent
        self.bg_color = bg_color
        self.bg = ColorClip(size, col=self.bg_color).get_frame(0)

        # compute duration
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio != None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask
        if transparent:
            maskclips = [
                c.mask.set_pos(c.pos) for c in self.clips if c.mask is not None
            ]
            if maskclips != []:
                self.mask = CompositeVideoClip(maskclips,
                                               self.size,
                                               transparent=False,
                                               ismask=True)

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg
            for c in self.playing_clips(t):
                f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame
示例#33
0
def test_concatenate_floating_point(util):
    """
    >>> print("{0:.20f}".format(1.12))
    1.12000000000000010658

    This test uses duration=1.12 to check that it still works when the clip
    duration is represented as being bigger than it actually is. Fixed in #1195.
    """
    clip = ColorClip([100, 50], color=[255, 128, 64],
                     duration=1.12).with_fps(25.0)
    concat = concatenate_videoclips([clip])
    concat.write_videofile(os.path.join(util.TMP_DIR, "concat.mp4"),
                           preset="ultrafast")
 def create_thumbnail(self):
     logger.info('Creating thumbnail...')
     color = (255, 255, 255)
     size = (1280, 720)
     background = ColorClip(size, color)
     logo = ImageClip(config.LOGO_PATH).set_duration(1) \
         .resize(width=400, height=200) \
         .set_pos(('center', 'center'))
     text = TextClip(txt=str(self.id), size=(500, 500)).set_position(
         ('center', 'bottom'))
     CompositeVideoClip([background, logo,
                         text]).save_frame(config.THUMB_PATH)
     logger.info('Thumbnail saved...')
示例#35
0
def on_color(clip, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
    """
    Returns a clip made of the current clip overlaid on a color
    clip of a possibly bigger size. Can serve to flatten transparent
    clips (ideal for previewing clips with masks).

    :param size: size of the final clip. By default it will be the
       size of the current clip.
    :param bg_color: the background color of the final clip
    :param pos: the position of the clip in the final clip.
    :param col_opacity: should the added zones be transparent ?
    """
    if size is None:
        size = clip.size
    if pos is None:
        pos = "center"
    colorclip = ColorClip(size, color=color)
    if col_opacity:
        colorclip = colorclip.with_mask().with_opacity(col_opacity)

    return CompositeVideoClip([colorclip, clip.with_position(pos)],
                              transparent=(col_opacity is not None))
示例#36
0
def on_color(clip, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
    """ 
    Returns a clip made of the current clip overlaid on a color
    clip of a possibly bigger size. Can serve to flatten transparent
    clips (ideal for previewing clips with masks).
    
    :param size: size of the final clip. By default it will be the
       size of the current clip.
    :param bg_color: the background color of the final clip
    :param pos: the position of the clip in the final clip.
    :param col_opacity: should the added zones be transparent ?
    """
    
    if size is None:
        size = clip.size
    if pos is None:
        pos = 'center'
    colorclip = ColorClip(size, color)
    if col_opacity:
        colorclip = colorclip.with_mask().set_opacity(col_opacity)

    return CompositeVideoClip([colorclip, clip.set_pos(pos)],
                              transparent=(col_opacity is not None))
示例#37
0
def test_setup():
    """Test VideoFileClip setup."""
    red = ColorClip((1024,800), color=(255,0,0))
    green = ColorClip((1024,800), color=(0,255,0))
    blue = ColorClip((1024,800), color=(0,0,255))

    red.fps = green.fps = blue.fps = 30
    with clips_array([[red, green, blue]]).set_duration(5) as video:
        video.write_videofile(os.path.join(TMP_DIR, "test.mp4"))

    assert os.path.exists(os.path.join(TMP_DIR, "test.mp4"))

    with VideoFileClip(os.path.join(TMP_DIR, "test.mp4")) as clip:
        assert clip.duration == 5
        assert clip.fps == 30
        assert clip.size == [1024*3, 800]

    red.close()
    green.close()
    blue.close()
示例#38
0
def test_PR_458():
    clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10)
    clip.write_videofile(os.path.join(TMP_DIR, "test.mp4"),
                         progress_bar=False, fps=30)
    clip.close()
示例#39
0
    def __init__(self, clips, size=None, bg_color=None, use_bgclip=False,
                 ismask=False):

        if size is None:
            size = clips[0].size

        
        if use_bgclip and (clips[0].mask is None):
            transparent = False
        else:
            transparent = (bg_color is None)
        
        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)

        
        fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')]))
        if len(fps_list)==1:
            self.fps= fps_list[0]

        VideoClip.__init__(self)
        
        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.bg_color = bg_color

        if use_bgclip:
            self.bg = clips[0]
            self.clips = clips[1:]
        else:
            self.clips = clips
            self.bg = ColorClip(size, col=self.bg_color)

        
        
        # compute duration
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio is not None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask if necessary
        if transparent:
            maskclips = [(c.mask if (c.mask is not None) else
                          c.add_mask().mask).set_pos(c.pos).set_end(c.end).set_start(c.start, change_end=False)
                          for c in self.clips]

            self.mask = CompositeVideoClip(maskclips,self.size, ismask=True,
                                               bg_color=0.0)

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg.get_frame(t)
            for c in self.playing_clips(t):
                    f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame
示例#40
0
class CompositeVideoClip(VideoClip):

    """ 
    
    A VideoClip made of other videoclips displayed together. This is the
    base class for most compositions.
    
    Parameters
    ----------

    size
      The size (height x width) of the final clip.

    clips
      A list of videoclips. Each clip of the list will
      be displayed below the clips appearing after it in the list.
      For each clip:
       
      - The attribute ``pos`` determines where the clip is placed.
          See ``VideoClip.set_pos``
      - The mask of the clip determines which parts are visible.
        
      Finally, if all the clips in the list have their ``duration``
      attribute set, then the duration of the composite video clip
      is computed automatically

    bg_color
      Color for the unmasked and unfilled regions. Set to None for these
      regions to be transparent (will be slower).

    use_bgclip
      Set to True if the first clip in the list should be used as the
      'background' on which all other clips are blitted. That first clip must
      have the same size as the final clip. If it has no transparency, the final
      clip will have no mask. 
    
    If all clips with a fps attribute have the same fps, it becomes the fps of
    the result.

    """

    def __init__(self, clips, size=None, bg_color=None, use_bgclip=False,
                 ismask=False):

        if size is None:
            size = clips[0].size

        
        if use_bgclip and (clips[0].mask is None):
            transparent = False
        else:
            transparent = (bg_color is None)
        
        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)

        
        fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')]))
        if len(fps_list)==1:
            self.fps= fps_list[0]

        VideoClip.__init__(self)
        
        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.bg_color = bg_color

        if use_bgclip:
            self.bg = clips[0]
            self.clips = clips[1:]
        else:
            self.clips = clips
            self.bg = ColorClip(size, col=self.bg_color)

        
        
        # compute duration
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio is not None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask if necessary
        if transparent:
            maskclips = [(c.mask if (c.mask is not None) else
                          c.add_mask().mask).set_pos(c.pos).set_end(c.end).set_start(c.start, change_end=False)
                          for c in self.clips]

            self.mask = CompositeVideoClip(maskclips,self.size, ismask=True,
                                               bg_color=0.0)

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg.get_frame(t)
            for c in self.playing_clips(t):
                    f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame

    def playing_clips(self, t=0):
        """ Returns a list of the clips in the composite clips that are
            actually playing at the given time `t`. """
        return [c for c in self.clips if c.is_playing(t)]