Exemple #1
0
def test_issue_547():
    red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
    green = ColorClip((640, 480), color=(0, 255, 0)).with_duration(2)
    blue = ColorClip((640, 480), color=(0, 0, 255)).with_duration(3)

    video = concatenate_videoclips([red, green, blue], method="compose")
    assert video.duration == 6
    assert video.mask.duration == 6

    video = concatenate_videoclips([red, green, blue])
    assert video.duration == 6
Exemple #2
0
def composite_by_order(scenes):
    clips = []
    for scene in scenes:
        start_sec = scene[1][0].get_seconds()
        end_sec = scene[1][1].get_seconds()
        clips.append(scene[0].subclip(start_sec + 0.05, end_sec - 0.05))
    return concatenate_videoclips(clips)
Exemple #3
0
def render():
    if img1 is None:
        alert = QMessageBox()
        alert.setText('64비트 이미지를 선택해주세요.')
        alert.exec_()
        return

    if not pixelized:
        alert = QMessageBox()
        alert.setText('픽셀화를 해주세요.')
        alert.exec_()
        return

    if img2 is None:
        alert = QMessageBox()
        alert.setText('128비트 이미지를 선택해주세요.')
        alert.exec_()
        return

    image = Image.open(img2)
    resize(image).save('./resource/128bit.png')
    clips = [ImageClip(f'./resource/{m}').set_duration(1) for m in ['1bit.png', '2bit.png', '4bit.png', '8bit.png']]
    clips.append(ImageClip('./resource/16bit.png').set_duration(1.6))
    clips.append(ImageClip('./resource/32bit.png').set_duration(1.8))
    clips.append(ImageClip('./resource/64bit.png').set_duration(2))
    clips.append(ImageClip('./resource/128bit.png').set_duration(1))
    concat_clip = concatenate_videoclips(clips, method="compose")
    concat_clip.audio = AudioFileClip(r"./resource/audio.mp3")
    concat_clip.write_videofile("result.mp4", fps=24)
    
    alert = QMessageBox()
    alert.setText('렌더링 완료 result.mp4가 생성되었습니다.')
    alert.exec_()
def test_subtitles(util):
    red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(
        txt,
        font=util.FONT,
        size=(800, 600),
        font_size=24,
        method="caption",
        align="South",
        color="white",
    )

    subtitles = SubtitlesClip("media/subtitles.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.subclip(0, 0.5).write_videofile(
        os.path.join(util.TMP_DIR, "subtitles.mp4"),
        fps=5,
        logger=None,
    )

    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA

    subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator)
    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
Exemple #5
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255,0,0)).set_duration(10)
    green = ColorClip((800, 600), color=(0,255,0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0,0,255)).set_duration(10)
    myvideo = concatenate_videoclips([red,green,blue])
    assert myvideo.duration == 30

    #travis does not like TextClip.. so return for now..
    #but allow regular users to still run the test below
    if TRAVIS:
       return

    generator = lambda txt: TextClip(txt, font='Liberation-Mono',
                                     size=(800,600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
Exemple #6
0
    def subfx(self, fx, start_time=0, end_time=None, **kwargs):
        """Apply a transformation to a part of the clip.

        Returns a new clip in which the function ``fun`` (clip->clip)
        has been applied to the subclip between times `start_time` and `end_time`
        (in seconds).

        Examples
        --------

        >>> # The scene between times t=3s and t=6s in ``clip`` will be
        >>> # be played twice slower in ``new_clip``
        >>> new_clip = clip.subapply(lambda c:c.multiply_speed(0.5) , 3,6)

        """
        left = None if (start_time == 0) else self.subclip(0, start_time)
        center = self.subclip(start_time, end_time).fx(fx, **kwargs)
        right = None if (end_time is None) else self.subclip(
            start_time=end_time)

        clips = [clip for clip in [left, center, right] if clip is not None]

        # beurk, have to find other solution
        from moviepy.video.compositing.concatenate import concatenate_videoclips

        return concatenate_videoclips(clips).with_start(self.start)
Exemple #7
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
Exemple #8
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
Exemple #9
0
def cut_video_silence(video_clip, chunk_duration, threshold):
    loud_timings = get_loud_timings(video_clip.audio, chunk_duration,
                                    threshold)
    loud_sections = [
        video_clip.subclip(start, end) for start, end in loud_timings
    ]
    return concatenate_videoclips(loud_sections)
Exemple #10
0
    def subfx(self, fx, ta=0, tb=None, **kwargs):
        """Apply a transformation to a part of the clip.

        Returns a new clip in which the function ``fun`` (clip->clip)
        has been applied to the subclip between times `ta` and `tb`
        (in seconds).

        Examples
        ---------

        >>> # The scene between times t=3s and t=6s in ``clip`` will be
        >>> # be played twice slower in ``newclip``
        >>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)

        """
        left = None if (ta == 0) else self.subclip(0, ta)
        center = self.subclip(ta, tb).fx(fx, **kwargs)
        right = None if (tb is None) else self.subclip(t_start=tb)

        clips = [c for c in [left, center, right] if c is not None]

        # beurk, have to find other solution
        from moviepy.video.compositing.concatenate import concatenate_videoclips

        return concatenate_videoclips(clips).set_start(self.start)
Exemple #11
0
def create_clip(clip_title, audio_dir, image_dir, level, include_images, fmt):
    """
    Creates section title text clip, a slideshow, and adds narrator audio.

    Args:
        clip_title (str) -- clip title
        audio_dir (Path) -- e.g. 'audio/<clip_title>/*.wav'
        image_dir (Path) -- e.g. 'images/python/resized/<clip_title>/*.jpg'
        level (int) -- Indentation level. Higher means more indented.
        include_images (bool) -- Should be true if section contains text.
        fmt (str) -- audio format which narrator used.
    Returns:
        clip (VideoClip) -- Combined TextClip and (optional) ImageSequence
    """
    audio_path = str(audio_dir / clip_title)

    clip = narrated_header(clip_title, audio_path, level, fmt)

    if include_images or level == 0:
        img_seq = narrated_image_seq(audio_path, image_dir / clip_title, fmt)
        clip = concatenate_videoclips([clip, img_seq], method='compose')

    print(clip_title, 'clip created!')

    clip = clip.set_fps(1)
    return clip
Exemple #12
0
def test_write_gif(util, clip_class, opt, loop, with_mask, pixel_format):
    filename = os.path.join(util.TMP_DIR, "moviepy_write_gif.gif")
    if os.path.isfile(filename):
        os.remove(filename)

    fps = 10

    if clip_class == "BitmapClip":
        original_clip = BitmapClip([["R"], ["G"], ["B"]],
                                   fps=fps).with_duration(0.3)
    else:
        original_clip = concatenate_videoclips([
            ColorClip(
                (1, 1),
                color=color,
            ).with_duration(0.1).with_fps(fps)
            for color in [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
        ])
    if with_mask:
        original_clip = original_clip.with_mask(
            ColorClip((1, 1), color=1,
                      is_mask=True).with_fps(fps).with_duration(0.3))

    kwargs = {}
    if pixel_format is not None:
        kwargs["pixel_format"] = pixel_format

    write_gif(
        original_clip,
        filename,
        fps=fps,
        with_mask=with_mask,
        program="ffmpeg",
        logger=None,
        opt=opt,
        loop=loop,
        **kwargs,
    )

    if pixel_format != "invalid":

        final_clip = VideoFileClip(filename)

        r, g, b = final_clip.get_frame(0)[0][0]
        assert r == 252
        assert g == 0
        assert b == 0

        r, g, b = final_clip.get_frame(0.1)[0][0]
        assert r == 0
        assert g == 252
        assert b == 0

        r, g, b = final_clip.get_frame(0.2)[0][0]
        assert r == 0
        assert g == 0
        assert b == 255

        assert final_clip.duration == (loop or 1) * round(
            original_clip.duration, 6)
Exemple #13
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    #travis does not like TextClip.. so return for now..
    #but allow regular users to still run the test below
    if TRAVIS:
        return

    generator = lambda txt: TextClip(txt,
                                     font='Liberation-Mono',
                                     size=(800, 600),
                                     fontsize=24,
                                     method='caption',
                                     align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
Exemple #14
0
def test_issue_407():
    red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(5)
    red.fps = 30

    assert red.fps == 30
    assert red.w == 800
    assert red.h == 600
    assert red.size == (800, 600)

    # ColorClip has no fps attribute.
    green = ColorClip((640, 480), color=(0, 255, 0)).with_duration(2)
    blue = ColorClip((640, 480), color=(0, 0, 255)).with_duration(2)

    assert green.w == blue.w == 640
    assert green.h == blue.h == 480
    assert green.size == blue.size == (640, 480)

    with pytest.raises(AttributeError):
        green.fps

    with pytest.raises(AttributeError):
        blue.fps

    video = concatenate_videoclips([red, green, blue])
    assert video.fps == red.fps
Exemple #15
0
    def subfx(self, fx, ta=0, tb=None, **kwargs):
        """Apply a transformation to a part of the clip.

        Returns a new clip in which the function ``fun`` (clip->clip)
        has been applied to the subclip between times `ta` and `tb`
        (in seconds).

        Examples
        ---------

        >>> # The scene between times t=3s and t=6s in ``clip`` will be
        >>> # be played twice slower in ``newclip``
        >>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)

        """
        left = None if (ta == 0) else self.subclip(0, ta)
        center = self.subclip(ta, tb).fx(fx, **kwargs)
        right = None if (tb is None) else self.subclip(t_start=tb)

        clips = [c for c in [left, center, right] if c is not None]

        # beurk, have to find other solution
        from moviepy.video.compositing.concatenate import concatenate_videoclips

        return concatenate_videoclips(clips).set_start(self.start)
Exemple #16
0
def freeze(clip,
           t=0,
           freeze_duration=None,
           total_duration=None,
           padding_end=0):
    """ Momentarily freeze the clip at time t.

    Set `t='end'` to freeze the clip at the end (actually it will freeze on the
    frame at time clip.duration - padding_end seconds).
    With ``duration``you can specify the duration of the freeze.
    With ``total_duration`` you can specify the total duration of
    the clip and the freeze (i.e. the duration of the freeze is
    automatically calculated). One of them must be provided.
    """

    if t == "end":
        t = clip.duration - padding_end

    if freeze_duration is None:
        freeze_duration = total_duration - clip.duration

    before = [clip.subclip(0, t)] if (t != 0) else []
    freeze = [clip.to_ImageClip(t).set_duration(freeze_duration)]
    after = [clip.subclip(t)] if (t != clip.duration) else []
    return concatenate_videoclips(before + freeze + after)
Exemple #17
0
def do_concatenate(clips):
    """ Concatenate clips into one.

        clips - ordered list of clips to concatenate
    """
    result = concatenate_videoclips(clips)

    return result
Exemple #18
0
def test_issue_285():
    clip_1, clip_2, clip_3 = (
        ImageClip("media/python_logo.png", duration=10),
        ImageClip("media/python_logo.png", duration=10),
        ImageClip("media/python_logo.png", duration=10),
    )
    merged_clip = concatenate_videoclips([clip_1, clip_2, clip_3])
    assert merged_clip.duration == 30
Exemple #19
0
def do_concatenate(clips):
    """ Concatenate clips into one.

        clips - ordered list of clips to concatenate
    """
    result = concatenate_videoclips(clips)

    return result
Exemple #20
0
def time_symmetrize(clip):
    """
    Returns a clip that plays the current clip once forwards and
    then once backwards. This is very practival to make video that
    loop well, e.g. to create animated GIFs.
    This effect is automatically applied to the clip's mask and audio
    if they exist.
    """
    return concatenate_videoclips([clip, clip.fx( time_mirror )])
Exemple #21
0
def time_symmetrize(clip):
    """
    Returns a clip that plays the current clip once forwards and
    then once backwards. This is very practival to make video that
    loop well, e.g. to create animated GIFs.
    This effect is automatically applied to the clip's mask and audio
    if they exist.
    """
    return concatenate_videoclips([clip, clip.fx(time_mirror)])
def test_detect_scenes():
    """Test that a cut is detected between concatenated red and green clips."""
    red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
    green = ColorClip((640, 480), color=(0, 200, 0)).with_duration(1)
    video = concatenate_videoclips([red, green])

    cuts, luminosities = detect_scenes(video, fps=10, logger=None)

    assert len(cuts) == 2
Exemple #23
0
def test_concatenate_self(util):
    clip = BitmapClip([["AAA", "BBB"], ["CCC", "DDD"]], fps=1)
    target = BitmapClip([["AAA", "BBB"], ["CCC", "DDD"]], fps=1)

    concatenated = concatenate_videoclips([clip])

    concatenated.write_videofile(
        os.path.join(util.TMP_DIR, "test_concatenate_self.mp4"))
    assert concatenated == target
Exemple #24
0
    def make_movie(self, video_out, source=None):
        """
        Make a movie of either all the clips that have been added, or just a single clip if source is not None
        :param video_out: Filename of output file.
        :param source: set to index of a clip to use just that clip, or None to join all clips
        :return:
        """
        if source is not None:
            video = create_videoclip(self.frame_sources[source],
                                     self.duration[source], self.frame_rate,
                                     self.audio_files[source])
        else:
            clips = [
                create_videoclip(s, d, self.frame_rate, a) for s, d, a in zip(
                    self.frame_sources, self.duration, self.audio_files)
            ]
            video = concatenate_videoclips(clips)

        # Due to a bug in moviepy 1.0.1, when we write a video out in this mode the audio is not included.
        # So we write the video and audio out to separate temporary files.
        # We then use ffmpeg directly to combine the video and audio.
        temp_video_filename = temp_file(
            pathlib.Path(video_out).stem + "TEMP.mp4")
        temp_audio_filename = temp_file(
            pathlib.Path(video_out).stem + "TEMP.m4a")

        if not self.audio_files[0]:
            video.write_videofile(video_out,
                                  codec="libx264",
                                  fps=self.frame_rate)
        else:
            video.write_videofile(temp_video_filename,
                                  temp_audiofile=temp_audio_filename,
                                  codec="libx264",
                                  remove_temp=False,
                                  audio_codec="aac",
                                  fps=self.frame_rate)

            command = [
                "ffmpeg",
                "-y",  #approve output file overwite
                "-i",
                temp_video_filename,
                "-i",
                temp_audio_filename,
                "-c:v",
                "copy",
                "-c:a",
                "copy",
                "-shortest",
                "-r",
                str(self.frame_rate),
                video_out
            ]
            process = sp.Popen(command)
def generate(clips, segments, song, name):
    print(f'Generating {name}...')
    clips = [
        clips[i % len(clips)].subclip(*segment)
        for i, segment in tqdm(enumerate(segments))
    ]
    print(f'Generated. Generating {name} video...')
    clip = concatenate_videoclips(clips)
    clip = clip.set_audio(song)
    print(f'Generated. Writing {name} video...')
    clip.write_videofile(f'{name}mix.mp4')
    print('Generated.')
Exemple #26
0
def output_mp4(name, data, durations, max_workers=None, method='chain'):
    # sanity check the images
    sizes = {frame.shape for frame in data}
    assert method == 'compose' or len(sizes) == 1, sizes
    # turn the image into clips
    clips = [ImageClip(data, duration=d) for (data, d) in zip(data, durations)]
    # save the mp4
    movie = concatenate_videoclips(clips, method=method)
    movie.write_videofile(str(output_path / (name + '.mp4')),
                          fps=24,
                          threads=max_workers or cpu_count(),
                          bitrate='10M')
Exemple #27
0
def join_sections(video_clip: Union[VideoClip, AudioClip],
                  sections: Iterable[Tuple[float, float]]
) -> VideoClip:  # {{{
    '''join sections from video_clip endoded as a list of (start, end) timestamp pairs.

    :param video_clip: clip to extract sections from
    :param sections: list of (start_time, end_time) pairs that encode section timestamp values.
    :return: edited video_clip
    '''
    return concatenate_videoclips(
        [video_clip.subclip(start, end) for start, end in sections]
    )
Exemple #28
0
def addNum2():
    source = "E:/视频处理"  #获取源视频文件存储地址
    target = "E:/视频处理/1.mp4"  #获取合成视频保存地址
    video_list = []  #定义加载后的视频存储列表
    for root, dirs, files in os.walk(source):
        #files = natsorted(files)#按1,2,10类似规则对视频文件进行排序
        for file in files:
            if os.path.splitext(file)[1] == ".mp4":  #判断视频文件格式是否为.mp4
                file_path = os.path.join(source, file)  #粘合完整视频路径
                video = VideoFileClip(file_path)  #加载视频
                video_list.append(video)  #将加载完后的视频加入列表
    final_clip = concatenate_videoclips(video_list)  #进行视频合并
    final_clip.to_videofile(target, fps=24, remove_temp=True)  #将合并后的视频输出
Exemple #29
0
def test_concatenate_floating_point(util):
    """
    >>> print("{0:.20f}".format(1.12))
    1.12000000000000010658

    This test uses duration=1.12 to check that it still works when the clip
    duration is represented as being bigger than it actually is. Fixed in #1195.
    """
    clip = ColorClip([100, 50], color=[255, 128, 64],
                     duration=1.12).with_fps(25.0)
    concat = concatenate_videoclips([clip])
    concat.write_videofile(os.path.join(util.TMP_DIR, "concat.mp4"),
                           preset="ultrafast")
    def _render_clip(self, frames):
        logger = logging.getLogger('logger')
        logger.info("Rendering video...")

        clips = []
        clip_duration = 1 / self.frame_rate
        for frame in frames:
            clip = ImageClip(frame.img)
            clip = clip.set_duration(clip_duration)
            clips.append(clip)
        final_clip = concatenate_videoclips(clips, method="chain")
        final_clip = final_clip.set_audio(AudioFileClip(self.audio.path))
        final_clip = final_clip.set_fps(self.frame_rate)
        return final_clip
Exemple #31
0
    def process(self, data, inputs, utils):
        clips = []
        for found in data:
            start, end = self.parse_caption_time(found.time)
            video = VideoFileClip(found.yt.video_filepath).subclip(start, end)
            clips.append(video)
            if len(clips) >= inputs['limit']:
                break

        final_clip = concatenate_videoclips(clips)
        output_filepath = utils.get_output_filepath(inputs['channel_id'],
                                                    inputs['search_word'])
        final_clip.write_videofile(output_filepath)
        return data
Exemple #32
0
def add_outro(clips):
    thx = TextClip(THANKS,
                   color='white',
                   fontsize=72,
                   size=VIDEO_SIZE,
                   method='caption').set_duration(2)
    sub = TextClip(SUBSCRIBE,
                   color='white',
                   fontsize=72,
                   size=VIDEO_SIZE,
                   method='caption').set_duration(2)

    return concatenate_videoclips(clips + [thx, sub],
                                  method='compose').on_color(color=BLACK,
                                                             col_opacity=1)
Exemple #33
0
def run():
    project = Project(args=parse_args())

    images = build_images(project.args, image_paths=project.image_paths)
    image_chunks: List[List[ProjectImage]] = list(chunked(images, N_IMAGES_PER_CHUNK))

    videos: List[ImageSequenceClip] = []
    for image_chunk in tqdm(image_chunks, desc="Processing chunks"):
        videos.append(create_video(args=project.args, images=image_chunk))

    final_video = concatenate_videoclips(videos)
    if project.args.audio:
        print(f"🔊 Using audio from {project.args.audio}")
        final_video.set_audio(project.args.audio)
    final_video.write_videofile(filename=project.args.output, audio=project.args.audio)
Exemple #34
0
def composite_along_beats(scenes, audio_path):
    beats = get_beats(audio_path)
    prev = 0
    clips = []
    for beat in beats:
        if len(scenes) == 0:
            break
        interval = beat - prev
        clip = find_nearest(scenes, interval)
        scenes.remove(clip)
        start_sec = clip[1][0].get_seconds()
        end_sec = start_sec + interval
        clips.append(clip[0].subclip(start_sec, end_sec))
        prev = beat
    return concatenate_videoclips(clips)
Exemple #35
0
def addmp4(head="",mp4="",end=""):
    L=[head,mp4,end]
    vl=[]
    ll=[]
    
    while "" in L:
        L.remove("")
    print (L)
    le=len(L)
    for v in range(le):
        t=VideoFileClip(L[v])
        ll.append(t)
    filen= os.path.basename(mp4)
    final_clip = concatenate_videoclips(ll,method='compose')
    final_clip.write_videofile(work_path+"\\"+filen)
    final_clip.close()           
Exemple #36
0
def freeze_at_start(clip, freeze_duration=None, total_duration=None):
    """ Momentarily freeze the clip on its first frame.

    With ``duration``you can specify the duration of the freeze.
    With ``total_duration`` you can specify the total duration of
    the clip and the freeze (i.e. the duration of the freeze is
    automatically calculated). If neither is provided, the freeze
    will have an infinite length.
    """
    
    freezed_clip = ImageClip(clip.get_frame(0))
    if clip.mask:
        freezed_clip.mask = ImageClip(clip.mask.get_frame(0))
    
    if total_duration:
        freeze_duration = total_duration - clip.duration
    if freeze_duration:
        freezed_clip = freezed_clip.set_duration(freeze_duration)
    
    return concatenate_videoclips([freezed_clip, clip])
Exemple #37
0
def freeze(clip, t=0, freeze_duration=None, total_duration=None,
           padding_end=0):
    """ Momentarily freeze the clip at time t.

    Set `t='end'` to freeze the clip at its end.
    With ``duration``you can specify the duration of the freeze.
    With ``total_duration`` you can specify the total duration of
    the clip and the freeze (i.e. the duration of the freeze is
    automatically calculated). One of them must be provided.
    """

    if t=='end':
        t = clip.duration

    if freeze_duration is None:
        freeze_duration = total_duration - clip.duration

    before = [clip.subclip(0,t)] if (t!=0) else []
    freeze = [clip.to_ImageClip(t).set_duration(freeze_duration)]
    after = [clip.subclip(t)] if (t !=clip.duration) else []
    return concatenate_videoclips(before+freeze+after)