예제 #1
0
def test_duration():
    clip = TextClip("hello world", size=(1280, 720), color="white", font=FONT)
    clip = clip.set_duration(5)
    assert clip.duration == 5
    clip.close()

    clip2 = clip.fx(blink, d_on=1, d_off=1)
    clip2 = clip2.set_duration(5)
    assert clip2.duration == 5
    close_all_clips(locals())
예제 #2
0
def test_duration():

    clip = TextClip('hello world', size=(1280,720), color='white')
    clip = clip.set_duration(5) #  Changed due to #598.
    assert clip.duration == 5
    clip.close()

    clip2 = clip.fx(blink, d_on=1, d_off=1)
    clip2 = clip2.set_duration(5)
    assert clip2.duration == 5
    close_all_clips(locals())
예제 #3
0
def test_duration():

    clip = TextClip('hello world', size=(1280,720), color='white')
    clip = clip.set_duration(5) #  Changed due to #598.
    assert clip.duration == 5
    clip.close()

    clip2 = clip.fx(blink, d_on=1, d_off=1)
    clip2 = clip2.set_duration(5)
    assert clip2.duration == 5
    close_all_clips(locals())
예제 #4
0
def test_duration(util):
    clip = TextClip("hello world",
                    size=(1280, 720),
                    color="white",
                    font=util.FONT)
    clip = clip.with_duration(5)
    assert clip.duration == 5
    clip.close()

    clip2 = clip.fx(blink, duration_on=1, duration_off=1)
    clip2 = clip2.with_duration(5)
    assert clip2.duration == 5
예제 #5
0
def test_duration():
    #TextClip returns the following error under Travis (issue with Imagemagick)
    #convert.im6: not authorized `@/tmp/tmpWL7I3M.txt' @ error/property.c/InterpretImageProperties/3057.
    #convert.im6: no images defined `PNG32:/tmp/tmpRZVqGQ.png' @ error/convert.c/ConvertImageCommand/3044.
    if TRAVIS:
        return

    clip = TextClip('hello world', size=(1280, 720), color='white')
    clip = clip.set_duration(5)  #  Changed due to #598.
    assert clip.duration == 5
    clip.close()

    clip2 = clip.fx(blink, d_on=1, d_off=1)
    clip2 = clip2.set_duration(5)
    assert clip2.duration == 5
    clip2.close()
예제 #6
0
def orchestrate_video_creation(image_list, text):
    first_image_reference = image_list.pop()

    generator = create_opencv_image_from_stringio([first_image_reference])
    first_image = next(generator)
    height, width, layers = first_image.shape

    size = (width, height)
    print("resolution of images", size)
    video_path = 'project.mp4'
    frames_per_second = len(image_list) / 12
    print("frames per second", frames_per_second)
    out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'),
                          frames_per_second, size)

    for img in create_opencv_image_from_stringio(image_list):
        out.write(img)

    out.release()

    sleep(1)

    prepare_video(video_path,
                  aspect_ratios=(9 / 16),
                  max_duration=14.9,
                  min_size=(612, 612),
                  max_size=(1080, 1920),
                  save_path='second.mp4')

    # TODO: set text a bit up from bottom
    text = TextClip(text, fontsize=54, color='blue').set_position(
        ("center")).set_duration(4)
    clip = VideoFileClip('second.mp4', audio=False)
    final_clip = CompositeVideoClip([clip, text])
    final_clip.write_videofile(video_path, fps=frames_per_second)
    text.close()
    clip.close()
    final_clip.close()
    return {'file': 'project.mp4'}