Exemplo n.º 1
0
def prepare_all_comment_clips(comments, list_or_replies, start_time):
    section_time = start_time
    audio_clips = []
    video_clips = []
    ups_arrow_img = []
    bottom_bar_img = []
    downs_arrow_img = []
    author_name = []
    corner_logo_img = []
    transition_clips = []
    ups_text = []

    for i in range(0, len(comments)):
        video_clips_temp, audio_clips_temp, ups_arrow_img_temp, bottom_bar_img_temp, downs_arrow_img_temp, author_name_temp, corner_logo_img_temp, ups_text_temp, section_time = read_comment_and_replies(
            comments[i], list_or_replies[i], section_time)
        audio_clips += audio_clips_temp
        video_clips += video_clips_temp
        ups_arrow_img += ups_arrow_img_temp
        downs_arrow_img += downs_arrow_img_temp
        author_name += author_name_temp
        ups_text += ups_text_temp
        corner_logo_img += corner_logo_img_temp
        section_time = section_time
        bottom_bar_img += bottom_bar_img_temp
        transition_clip = mpy.VideoFileClip(transition_clip_path).set_start(
            section_time)
        transition_clip_audio = volumex(transition_clip.audio, 0.5)
        transition_clips.append(
            transition_clip.set_audio(transition_clip_audio))
        section_time = section_time + transition_clip.duration
        audio_clips.append(transition_clip.audio)
        # print(section_time)

    return video_clips, audio_clips, ups_arrow_img, downs_arrow_img, bottom_bar_img, author_name, corner_logo_img, transition_clips, ups_text, section_time
Exemplo n.º 2
0
def convert(videos, sounds, dest, tduration=30, mduration=30):
    acc = 0
    clips = []

    for v, s in zip(videos, sounds):
        c = read_clip(v)
        if c.duration >= mduration:
            continue
        c = process_clip(c, s)
        acc += c.duration
        clips.append(c)
        if acc > tduration:
            break

    end_clip = mp.TextClip("FIN",
                           fontsize=100,
                           color="white",
                           font="garamond",
                           method="caption").set_duration(3)
    clips.append(end_clip)

    output = mp.concatenate_videoclips(clips, method="compose")
    music = audio_loop.audio_loop(volumex.volumex(
        mp.AudioFileClip("bgm.mp3").set_start(0), 0.2),
                                  duration=output.duration)

    new_audio = mp.CompositeAudioClip([music, output.audio])
    output = output.set_audio(new_audio)
    output.write_videofile(dest + "/" + "output.mp4")
Exemplo n.º 3
0
 def add(self, strId, path, size=None, volume=None):
     clip = VideoFileClip(path)
     if size:
         clip.resize(size)
     if volume:
         clip = volumex(clip, volume)
     self.__clipDict[strId] = clip
Exemplo n.º 4
0
def prepare_background_music(video_length):
    background_songs = os.listdir(music_dir)
    song_rng = np.random.randint(0, len(background_songs))
    background_song = mpy.AudioFileClip(music_dir + '/' +
                                        background_songs[song_rng])
    background_audio = background_song

    extend_audio_factor = int(
        video_length / background_audio.duration
    ) + 1  # number of times to repeat audio in order to have the correct length

    if video_length > background_audio.duration:
        print('extending audiotrack to match video.')
        for i in range(0, extend_audio_factor):
            song_rng = np.random.randint(0, len(background_songs))
            background_song = mpy.AudioFileClip(music_dir + '/' +
                                                background_songs[song_rng])
            background_audio = mpy.concatenate_audioclips(
                [background_audio, background_song])

    background_audio = background_audio.set_duration(video_length)
    background_audio = volumex(background_audio, 0.1)

    return background_audio
Exemplo n.º 5
0
def make_round(stack: ExitStack, output_config: OutputConfig, r_i: int,
               cutter_lock: Lock, max_threads_semaphore: Semaphore):
    max_threads_semaphore.acquire()
    round_config = output_config.rounds[r_i]
    ext, codec = _get_ext_codec(output_config.raw)

    # Skip rounds that have been saved
    if round_config._is_on_disk:
        name = get_round_name(output_config.name, round_config.name, ext)
        max_threads_semaphore.release()
        return name

    # Assemble beatmeter video from beat images
    bmcfg = (round_config.beatmeter_config if round_config.bmcfg else None)
    if round_config.beatmeter is not None:
        print("\r\nAssembling beatmeter #{}...".format(r_i + 1))
        beatmeter_thread = ThreadWithReturnValue(
            target=lambda: make_beatmeter(
                stack,
                round_config.beatmeter,
                bmcfg.fps if bmcfg else output_config.fps,
                round_config.duration,
                (output_config.xdim, output_config.ydim),
            ),
            daemon=True,
        )
        beatmeter_thread.start()

    # Get list of clips cut from sources using chosen cutter
    # TODO: get duration, bpm from music track; generate beatmeter
    print("\r\nLoading sources for round #%i..." % (r_i + 1))
    cutter = get_cutter(stack, output_config, round_config)
    print("\r\nShuffling input videos for round #%i..." % (r_i + 1))
    if output_config.versions > 1:
        # Await previous cutter, if still previewing
        cutter_lock.acquire()
        # TODO: pass in cutter lock to release on preview exit
        clips = cutter.get_compilation()
        cutter_lock.release()
    else:
        clips = cutter.get_compilation()

    # Assemble audio from music and beats
    audio = None
    if (round_config.music is not None or round_config.beats is not None):
        audio = [
            stack.enter_context(AudioFileClip(clip)) for clip in [
                round_config.beats,
                round_config.music,
            ] if clip is not None
        ]

    # Concatenate this round's video clips together and add audio
    round_video = concatenate_videoclips(clips)
    if audio is not None:
        beat_audio = CompositeAudioClip(audio)
        level = round_config.audio_level
        if level > 1:
            beat_audio = volumex(beat_audio, 1 / level)
        else:
            round_video = volumex(round_video, level)
        audio = CompositeAudioClip([beat_audio, round_video.audio])
        round_video = round_video.set_audio(audio)

    # Add beatmeter, if supplied
    if round_config.beatmeter is not None:
        # Wait for beatmeter, if it exists
        print("\r\nWaiting for beatmeter #%i..." % (r_i + 1))
        beatmeter = beatmeter_thread.join()
        round_video = CompositeVideoClip([round_video, beatmeter])
    round_video = round_video.set_duration(round_config.duration)

    # Fade in and out
    round_video = crossfade([
        get_black_clip((output_config.xdim, output_config.ydim)),
        round_video,
        get_black_clip((output_config.xdim, output_config.ydim)),
    ])

    if output_config.cache == "round":
        # Save each round video to disk
        filename = get_round_name(output_config.name, round_config.name, ext)
        filename = _write_video(stack, round_video, filename, codec,
                                output_config.fps, ext)
        round_config._is_on_disk = filename is not None
        max_threads_semaphore.release()
        return filename
    else:  # output_config.cache == "all":
        max_threads_semaphore.release()
        return round_video  # Store round in memory instead