Exemple #1
0
def test_afterimage():
    ai = ImageClip("media/afterimage.png")
    masked_clip = mask_color(ai, color=[0, 255, 1])  # for green
    some_background_clip = ColorClip((800, 600), color=(255, 255, 255))
    final_clip = CompositeVideoClip([some_background_clip, masked_clip],
                                    use_bgclip=True).with_duration(0.2)
    final_clip.write_videofile(os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
Exemple #2
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
Exemple #3
0
def make_weekly_movie(cam: Cam, executor):
    root = Path(conf.root_dir) / 'data' / cam.name
    path = root / 'regular' / 'imgs'
    start = pendulum.yesterday()
    logger.info(f'Running make weekly movie for ww{start.week_of_year}')
    week_ago = start.subtract(weeks=1).date()
    sequence = []
    morning = pendulum.Time(6)
    evening = pendulum.Time(18)
    for day in sorted(list(path.iterdir()), key=lambda x: pendulum.from_format(x.name, 'DD_MM_YYYY')):
        if pendulum.from_format(day.name, 'DD_MM_YYYY').date() > week_ago:
            for img in sorted(day.iterdir()):
                t_img = img.name.split('.')[0]
                t_img = pendulum.from_format(t_img, 'DD_MM_YYYY_HH-mm-ss').time()
                if morning < t_img < evening:
                    sequence.append(str(img))
    sequence = check_sequence_for_gray_images(sequence, executor)
    txt_clip = make_txt_movie(sequence, 100, executor)
    logger.info(f'Composing clip for weekly movie ww{start.week_of_year}')
    image_clip = ImageSequenceClip(sequence, fps=100)
    clip = CompositeVideoClip([image_clip, txt_clip.set_position(('right', 'top'))], use_bgclip=True)
    movie_path = root / 'regular' / 'weekly' / f'ww{start.week_of_year}.mp4'
    movie_path.parent.mkdir(parents=True, exist_ok=True)
    clip.write_videofile(str(movie_path), audio=False)
    logger.info(f'Finished with clip for weekly movie ww{start.week_of_year}')
    return Movie(clip.h, clip.w, movie_path, sequence[seq_middle(sequence)])
Exemple #4
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(
        txt,
        font=FONT,
        size=(800, 600),
        font_size=24,
        method="caption",
        align="South",
        color="white",
    )

    subtitles = SubtitlesClip("media/subtitles.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles.mp4"), fps=30)

    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA

    subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator)
    assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
    close_all_clips(locals())
Exemple #5
0
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
Exemple #6
0
def merge_video_audio(video_path, audio_path, outpath):
    """视频和音频合并"""
    audioclip = AudioFileClip(str(audio_path))
    videoclip = VideoFileClip(str(video_path))
    videoclip2 = videoclip.set_audio(audioclip)
    video = CompositeVideoClip([videoclip2])
    video.write_videofile(str(outpath), codec='mpeg4', fps=_fps)
Exemple #7
0
def test_afterimage():
    ai = ImageClip("media/afterimage.png")
    masked_clip = mask_color(ai, color=[0,255,1]) # for green
    some_background_clip = ColorClip((800,600), color=(255,255,255))
    final_clip = CompositeVideoClip([some_background_clip, masked_clip],
                                    use_bgclip=True)
    final_clip.duration = 5
    final_clip.write_videofile(
    os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
Exemple #8
0
def test_afterimage():
    ai = ImageClip("media/afterimage.png")
    masked_clip = mask_color(ai, color=[0, 255, 1])  # for green

    some_background_clip = ColorClip((800, 600), color=(255, 255, 255))

    final_clip = CompositeVideoClip([some_background_clip, masked_clip],
                                    use_bgclip=True)
    final_clip.duration = 5
    final_clip.write_videofile("/tmp/afterimage.mp4", fps=30)
Exemple #9
0
def main():
    if len(sys.argv) != 2:
        error("Usage {} <filename.json>", sys.argv[0])

    viz_filename = sys.argv[1]
    if not path.exists(viz_filename):
        error("File not found")

    root_path = path.dirname(viz_filename)

    with open(viz_filename) as f:
        song_data = json.loads(f.read())

        if 'song' not in song_data:
            error("\"song\" key not found in json")

        song_filename = path.join(root_path, song_data['song'])
        if not path.exists(song_filename):
            error("\"{}\" not found")

        song_clip = FFTClip(song_filename)

        glob_cache = {}

        if 'fft-clips' not in song_data:
            error("\"fft-clips\" key not found in json")
        amplitude_clips = []
        for clip_data in song_data['fft-clips']:
            assert 'x' in clip_data
            assert 'y' in clip_data
            assert 'frequency' in clip_data
            assert 'image-pattern' in clip_data
            relative_pattern = path.join(root_path, clip_data['image-pattern'])
            if 'resize-x' in clip_data:
                assert 'resize-x' in clip_data
                resize = (clip_data['resize-y'], clip_data['resize-x'])
            else:
                resize = None

            if relative_pattern in glob_cache:
                glob_store = glob_cache[relative_pattern]
            else:
                glob_store = GlobStore(relative_pattern, resize=resize)
                glob_cache[relative_pattern] = glob_store

            amplitude_clip = AmplitudeClip(
                fft_clip=song_clip,
                glob_store=glob_store,
                freq=clip_data['frequency']).set_position((clip_data['x'], clip_data['y']))

            amplitude_clips.append(amplitude_clip)

    composite = CompositeVideoClip(amplitude_clips, size=(1120, 367)).set_audio(song_clip)
    composite.write_videofile(viz_filename + '.avi', codec='h264', fps=12)
Exemple #10
0
def add_subtitle(video_path, subtitle_path, filename=None):
    generator: Callable[[Any], TextClip] = lambda txt: TextClip(txt,
                                                                font='assets/font/GothamMedium.ttf',
                                                                fontsize=45, color='white',
                                                                bg_color='#00000066')
    subtitle = margin(clip=SubtitlesClip(subtitle_path, generator).set_position(('center', 'bottom')), bottom=35, opacity=0)
    video = VideoFileClip(video_path, audio=True)
    composed_video = CompositeVideoClip([video, subtitle])
    output_filename = filename or replace_extension(add_prefix_to_filename(video_path, '[WITH-SUBTITLE] '), '.mp4')
    composed_video.write_videofile(output_filename,
                                   threads=2,
                                   fps=video.fps)
Exemple #11
0
def make_movie(cam: Cam, day: str, regular: bool = True):
    regular = 'regular' if regular else ''
    root = Path(conf.root_dir) / 'data' / cam.name
    path = root / 'regular' / 'imgs' / day
    logger.info(f'Running make movie for {path}:{day}')
    # sequence = check_sequence_for_gray_images(sorted(str(p) for p in path.iterdir()))
    sequence = sorted(str(p) for p in path.iterdir())
    txt_clip = make_txt_movie(sequence, cam.fps)
    logger.info(f'Composing clip for {path}:{day}')
    image_clip = ImageSequenceClip(sequence, fps=cam.fps)
    logger.info(f'ImageSequenceClip ready')
    clip = CompositeVideoClip(
        [image_clip, txt_clip.set_position(('right', 'top'))], use_bgclip=True)
    logger.info(f'CompositeVideoClip ready')
    movie_path = root / regular / 'clips' / f'{day}.mp4'
    movie_path.parent.mkdir(parents=True, exist_ok=True)
    clip.write_videofile(str(movie_path), audio=False)
Exemple #12
0
def add_subtitle(video_path, default_subtitle_path, translated_subtitle_path):
    if default_subtitle_path is None:
        return
    default_subtitle = margin(clip=SubtitlesClip(
        default_subtitle_path, default_subtitle_generator()).set_position(
            ('center', 'bottom')),
                              bottom=80,
                              opacity=0)
    translated_subtitle = margin(clip=SubtitlesClip(
        translated_subtitle_path,
        translation_subtitle_generator()).set_position(('center', 'bottom')),
                                 bottom=40,
                                 opacity=0)
    video = VideoFileClip(video_path, audio=True)
    composed_video = CompositeVideoClip(
        [video, default_subtitle, translated_subtitle])
    output_filename = replace_extension(
        add_prefix_to_filename(video_path, '[WITH-SUBTITLE] '), '.mp4')
    composed_video.write_videofile(output_filename, threads=2, fps=video.fps)
Exemple #13
0
class Compositor:
    def __init__(self,
                 clip,
                 intro='../../media/archiwum_intro.mp4',
                 alg='bicubic',
                 size=None,
                 oberver=None):
        intro_clip = VideoFileClip(intro).set_position(('center', 'center'))
        archive_clip = mp.VideoFileClip(clip, resize_algorithm=alg)
        if size is None:
            width = max([intro_clip.size[0], archive_clip.size[0]])
            height = max([intro_clip.size[1], archive_clip.size[1]])
            size = (width, height)
        archive_clip = archive_clip.resize(size)

        self.final_clip = concatenate_videoclips([intro_clip, archive_clip])
        self.final_clip = CompositeVideoClip([self.final_clip])

    def write(self, clip_name, logger='bar'):
        self.final_clip.write_videofile(clip_name, logger=logger)
Exemple #14
0
def generate_text_clip(text, number):
    filename = "tmp/" + name + "/clips/" + name + number + ".mp4"

    if not os.path.exists(filename):
        audio_filename = make_tts(text, number)
        audio = AudioFileClip(audio_filename)
        image = ImageClip(background_image).set_fps(30)
        video = image.set_duration(audio.duration)
        withaudio = video.set_audio(audio)

        fontsize = (len(text) + 10) / withaudio.w
        text_clip = TextClip(text,
                             fontsize=fontsize,
                             size=(withaudio.w, withaudio.h)).set_pos("center")

        final_clip = CompositeVideoClip(
            [withaudio, text_clip.set_duration(video.duration)])

        final_clip.write_videofile(filename)
    return filename
Exemple #15
0
def test_issue_334():
    # NOTE: this is horrible. Any simpler version ?
    last_move = None
    last_move1 = None

    lis = [
        (0.0, 113, 167, 47),
        (0.32, 138, 159, 47),
        (0.44, 152, 144, 47),
        (0.48, 193, 148, 47),
        (0.6, 193, 148, 47),
        (0.76, 205, 138, 55),
        (0.88, 204, 121, 63),
        (0.92, 190, 31, 127),
        (1.2, 183, 59, 127),
        (1.4, 137, 22, 127),
        (1.52, 137, 22, 127),
        (1.72, 129, 67, 127),
        (1.88, 123, 69, 127),
        (2.04, 131, 123, 63),
        (2.24, 130, 148, 63),
        (2.48, 130, 148, 63),
        (2.8, 138, 180, 63),
        (3.0, 138, 180, 63),
        (3.2, 146, 192, 63),
        (3.28, 105, 91, 151),
        (3.44, 105, 91, 151),
        (3.72, 11, 48, 151),
        (3.96, 5, 78, 151),
        (4.32, 4, 134, 1),
        (4.6, 149, 184, 48),
        (4.8, 145, 188, 48),
        (5.0, 154, 217, 48),
        (5.08, 163, 199, 48),
        (5.2, 163, 199, 48),
        (5.32, 164, 187, 48),
        (5.48, 163, 200, 48),
        (5.76, 163, 200, 48),
        (5.96, 173, 199, 48),
        (6.0, 133, 172, 48),
        (6.04, 128, 165, 48),
        (6.28, 128, 165, 48),
        (6.4, 129, 180, 48),
        (6.52, 133, 166, 48),
        (6.64, 133, 166, 48),
        (6.88, 144, 183, 48),
        (7.0, 153, 174, 48),
        (7.16, 153, 174, 48),
        (7.24, 153, 174, 48),
        (7.28, 253, 65, 104),
        (7.64, 253, 65, 104),
        (7.8, 279, 116, 80),
        (8.0, 290, 105, 80),
        (8.24, 288, 124, 80),
        (8.44, 243, 102, 80),
        (8.56, 243, 102, 80),
        (8.8, 202, 107, 80),
        (8.84, 164, 27, 104),
        (9.0, 164, 27, 104),
        (9.12, 121, 9, 104),
        (9.28, 77, 33, 104),
        (9.32, 52, 23, 104),
        (9.48, 52, 23, 104),
        (9.64, 33, 46, 104),
        (9.8, 93, 49, 104),
        (9.92, 93, 49, 104),
        (10.16, 173, 19, 104),
        (10.2, 226, 173, 48),
        (10.36, 226, 173, 48),
        (10.48, 211, 172, 48),
        (10.64, 208, 162, 48),
        (10.92, 220, 171, 48),
    ]

    lis1 = [
        (0.0, 113, 167, 47),
        (0.32, 138, 159, 47),
        (0.44, 152, 144, 47),
        (0.48, 193, 148, 47),
        (0.6, 193, 148, 47),
        (0.76, 205, 138, 55),
        (0.88, 204, 121, 63),
        (0.92, 190, 31, 127),
        (1.2, 183, 59, 127),
        (1.4, 137, 22, 127),
        (1.52, 137, 22, 127),
        (1.72, 129, 67, 127),
        (1.88, 123, 69, 127),
        (2.04, 131, 123, 63),
        (2.24, 130, 148, 63),
        (2.48, 130, 148, 63),
        (2.8, 138, 180, 63),
        (3.0, 138, 180, 63),
        (3.2, 146, 192, 63),
        (3.28, 105, 91, 151),
        (3.44, 105, 91, 151),
        (3.72, 11, 48, 151),
        (3.96, 5, 78, 151),
        (4.32, 4, 134, 1),
        (4.6, 149, 184, 48),
        (4.8, 145, 188, 48),
        (5.0, 154, 217, 48),
        (5.08, 163, 199, 48),
        (5.2, 163, 199, 48),
        (5.32, 164, 187, 48),
        (5.48, 163, 200, 48),
        (5.76, 163, 200, 48),
        (5.96, 173, 199, 48),
        (6.0, 133, 172, 48),
        (6.04, 128, 165, 48),
        (6.28, 128, 165, 48),
        (6.4, 129, 180, 48),
        (6.52, 133, 166, 48),
        (6.64, 133, 166, 48),
        (6.88, 144, 183, 48),
        (7.0, 153, 174, 48),
        (7.16, 153, 174, 48),
        (7.24, 153, 174, 48),
        (7.28, 253, 65, 104),
        (7.64, 253, 65, 104),
        (7.8, 279, 116, 80),
        (8.0, 290, 105, 80),
        (8.24, 288, 124, 80),
        (8.44, 243, 102, 80),
        (8.56, 243, 102, 80),
        (8.8, 202, 107, 80),
        (8.84, 164, 27, 104),
        (9.0, 164, 27, 104),
        (9.12, 121, 9, 104),
        (9.28, 77, 33, 104),
        (9.32, 52, 23, 104),
        (9.48, 52, 23, 104),
        (9.64, 33, 46, 104),
        (9.8, 93, 49, 104),
        (9.92, 93, 49, 104),
        (10.16, 173, 19, 104),
        (10.2, 226, 173, 48),
        (10.36, 226, 173, 48),
        (10.48, 211, 172, 48),
        (10.64, 208, 162, 48),
        (10.92, 220, 171, 48),
    ]

    def posi(t):
        global last_move
        if len(lis) == 0:
            return (last_move[1], last_move[2])
        if t >= lis[0][0]:
            last_move = item = lis.pop(0)
            return (item[1], item[2])
        else:
            if len(lis) > 0:
                dura = lis[0][0] - last_move[0]
                now = t - last_move[0]
                w = (lis[0][1] - last_move[1]) * (now / dura)
                h = (lis[0][2] - last_move[2]) * (now / dura)
                # print t, last_move[1] + w, last_move[2] + h
                return (last_move[1] + w, last_move[2] + h)
            return (last_move[1], last_move[2])

    def size(t):
        global last_move1
        if len(lis1) == 0:
            return (last_move1[3], last_move1[3] * 1.33)
        if t >= lis1[0][0]:
            last_move1 = item = lis1.pop(0)
            return (item[3], item[3] * 1.33)
        else:
            if len(lis) > 0:
                dura = lis1[0][0] - last_move1[0]
                now = t - last_move1[0]
                s = (lis1[0][3] - last_move1[3]) * (now / dura)
                nsw = last_move1[3] + s
                nsh = nsw * 1.33
                # print t, nsw, nsh
                return (nsw, nsh)
            return (last_move1[3], last_move1[3] * 1.33)

    avatar = VideoFileClip("media/big_buck_bunny_432_433.webm", has_mask=True)
    avatar.audio = None
    maskclip = ImageClip("media/afterimage.png",
                         is_mask=True,
                         transparent=True)
    avatar.with_mask(maskclip)  # must set maskclip here..
    concatenated = concatenate_videoclips([avatar] * 3)

    tt = VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0, 3)
    # TODO: Setting mask here does not work:
    # .with_mask(maskclip).resize(size)])
    final = CompositeVideoClip(
        [tt, concatenated.with_position(posi).fx(resize, size)])
    final.duration = tt.duration
    final.write_videofile(os.path.join(TMP_DIR, "issue_334.mp4"), fps=10)
Exemple #16
0
                              fontsize=fontsize,
                              color='white',
                              method='caption',
                              align=direction,
                              size=size)
            else:
                tc = TextClip(txt,
                              font='Nunito',
                              fontsize=fontsize,
                              color='white',
                              method='caption',
                              align='center',
                              size=(512, 25))
            return tc

        return generator

    sub_1 = SubtitlesClip(s_up, make_textclip=create_generator('North', 8))
    sub_2 = SubtitlesClip(s_down, make_textclip=create_generator('South', 24))

    sub_1.end = sub_2.end

    final = CompositeVideoClip([clip, sub_1, sub_2], size=size)
else:
    final = clip

final.write_videofile(outname,
                      codec='libx264',
                      audio_codec='aac',
                      fps=clip.fps)
Exemple #17
0
#title_txt = TextClip(TITLE_TEXT,color=FONT_COLOR, align='West',fontsize=26,
                    #font=FONT_FAMILY, method='label')

if len(BACKGROUND_IMAGE) > 0:
    bg = ImageClip(BACKGROUND_IMAGE)

# Scroll the text at the right speed
line_height = 30
txt_speed = float(line_height) * float(txt_height) / float(duration)

fl = lambda gf,t : gf(t)[int(txt_speed*t):int(txt_speed*t)+VIDEO_SIZE[1],:]
moving_txt= text.fl(fl, apply_to=['mask'])

# Create the video clip
clip = CompositeVideoClip(
    [
        vfx.resize(bg.set_position(BACKGROUND_IMAGE_POSITION), BACKGROUND_IMAGE_RESIZE),
        moving_txt.set_position(TEXT_POSITION)
    ]
    if len(BACKGROUND_IMAGE) > 0 else
    [
        moving_txt.set_position(TEXT_POSITION)
    ],
    size=VIDEO_SIZE).\
    on_color(
        color=BACKGROUND_COLOR,
        col_opacity=1).set_duration(duration).set_audio(audio)

clip.write_videofile(OUTPUT_FILE, fps=10)
    #use_bgclip=True if len(BACKGROUND_IMAGE) > 0 else False,
Exemple #18
0
def exw(nameclip, nametxttoload):
    tempname = f"content\Temp\{str(uuid.uuid4())}"
    namecliptoload = f"{tempname}done.mp4"

    video = VideoFileClip(nameclip)
    dur = video.duration
    print(round(dur))
    logo = (ImageClip("wm.png").set_duration(dur).set_pos(
        ("center", "bottom")))

    final = CompositeVideoClip([video, logo])
    final.write_videofile(f"{tempname}done.mp4", audio=True)

    if round(dur) < 600:
        sys.exit(1)
    elif round(dur) < 1200:
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(15, 17)
        final1.write_videofile(f'{tempname}cut.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(70, 72)
        final1.write_videofile(f'{tempname}cut1.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(100, 102)
        final1.write_videofile(f'{tempname}cut2.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(160, 162)
        final1.write_videofile(f'{tempname}cut3.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(200, 202)
        final1.write_videofile(f'{tempname}cut4.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(300, 302)
        final1.write_videofile(f'{tempname}cut5.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(400, 402)
        final1.write_videofile(f'{tempname}cut6.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(500, 502)
        final1.write_videofile(f'{tempname}cut7.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(600, 602)
        final1.write_videofile(f'{tempname}cut8.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(700, 702)
        final1.write_videofile(f'{tempname}cut9.mp4', audio=False)

    elif round(dur) < 2460:
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(15, 17)
        final1.write_videofile(f'{tempname}cut.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(70, 72)
        final1.write_videofile(f'{tempname}cut1.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(200, 202)
        final1.write_videofile(f'{tempname}cut2.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(360, 362)
        final1.write_videofile(f'{tempname}cut3.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(400, 402)
        final1.write_videofile(f'{tempname}cut4.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(500, 502)
        final1.write_videofile(f'{tempname}cut5.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(600, 602)
        final1.write_videofile(f'{tempname}cut6.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(700, 702)
        final1.write_videofile(f'{tempname}cut7.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(800, 802)
        final1.write_videofile(f'{tempname}cut8.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(900, 902)
        final1.write_videofile(f'{tempname}cut9.mp4', audio=False)

    elif round(dur) < 3600:
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(35, 37)
        final1.write_videofile(f'{tempname}cut.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(100, 102)
        final1.write_videofile(f'{tempname}cut1.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(300, 302)
        final1.write_videofile(f'{tempname}cut2.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(560, 562)
        final1.write_videofile(f'{tempname}cut3.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(700, 702)
        final1.write_videofile(f'{tempname}cut4.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(900, 902)
        final1.write_videofile(f'{tempname}cut5.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1200, 1202)
        final1.write_videofile(f'{tempname}cut6.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1400, 1402)
        final1.write_videofile(f'{tempname}cut7.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1600, 1602)
        final1.write_videofile(f'{tempname}cut8.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1800, 1802)
        final1.write_videofile(f'{tempname}cut9.mp4', audio=False)

    else:
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(55, 57)
        final1.write_videofile(f'{tempname}cut.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(100, 102)
        final1.write_videofile(f'{tempname}cut1.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(300, 302)
        final1.write_videofile(f'{tempname}cut2.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(560, 562)
        final1.write_videofile(f'{tempname}cut3.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(700, 702)
        final1.write_videofile(f'{tempname}cut4.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(900, 902)
        final1.write_videofile(f'{tempname}cut5.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1200, 1202)
        final1.write_videofile(f'{tempname}cut6.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1400, 1402)
        final1.write_videofile(f'{tempname}cut7.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1600, 1602)
        final1.write_videofile(f'{tempname}cut8.mp4', audio=False)
        final1 = VideoFileClip(f"{tempname}done.mp4").subclip(1700, 1702)
        final1.write_videofile(f'{tempname}cut9.mp4', audio=False)

    clip1 = VideoFileClip(f"{tempname}cut.mp4")
    clip2 = VideoFileClip(f"{tempname}cut1.mp4")
    clip3 = VideoFileClip(f"{tempname}cut2.mp4")
    clip4 = VideoFileClip(f"{tempname}cut3.mp4")
    clip5 = VideoFileClip(f"{tempname}cut4.mp4")
    clip6 = VideoFileClip(f"{tempname}cut5.mp4")
    clip7 = VideoFileClip(f"{tempname}cut6.mp4")
    clip8 = VideoFileClip(f"{tempname}cut7.mp4")
    clip9 = VideoFileClip(f"{tempname}cut8.mp4")
    clip10 = VideoFileClip(f"{tempname}cut9.mp4")
    final_clip = concatenate_videoclips([
        clip1, clip2, clip3, clip4, clip5, clip6, clip7, clip8, clip9, clip10
    ])
    final_clip.write_videofile(nameclip)
    print(nameclip)
    print(namecliptoload)
    print(nametxttoload)
    os.remove(f"{tempname}cut.mp4")
    os.remove(f"{tempname}cut1.mp4")
    os.remove(f"{tempname}cut2.mp4")
    os.remove(f"{tempname}cut3.mp4")
    os.remove(f"{tempname}cut4.mp4")
    os.remove(f"{tempname}cut5.mp4")
    os.remove(f"{tempname}cut6.mp4")
    os.remove(f"{tempname}cut7.mp4")
    os.remove(f"{tempname}cut8.mp4")
    os.remove(f"{tempname}cut9.mp4")
    uploader(nameclip, namecliptoload, nametxttoload)
    subprocess.Popen(
        ['loader.exe', nameclip, "content/Temp/1.mp4", nametxttoload])
Exemple #19
0
video = VideoFileClip("D:\\upload\\video.mp4").subclip(0, 6).add_mask()
video = video.resize(size)

foto1 = (ImageClip("D:\\upload\\foto1.png").set_duration(3))
foto1 = foto1.resize(size)

foto2 = (ImageClip("D:\\upload\\foto2.png").set_duration(3))
foto2 = foto2.resize(size)

foto3 = (ImageClip("D:\\upload\\fotoend.jpg").set_duration(2))
foto3 = foto3.resize(size)

num = video.duration + 3

numfin = num + 3

# creating the video
videofinal = CompositeVideoClip([
    foto1,  # starts at t=0
    video.set_start(3),
    foto2.set_start(num),
    foto3.set_start(numfin)
])

background_audio_clip = AudioFileClip("D:\\upload\\audio.mp3").subclip(0, 3)
bg_music = concatenate_audioclips([background_audio_clip, video.audio])

videofinal = videofinal.set_audio(bg_music)

videofinal.write_videofile("D:\\upload\\final.mp4")
Exemple #20
0
    return argv[1], argv[2]


#read args
compilation_path, logo_path = getArgs(sys.argv)
printAndFlush('Starting adding Logo to Video ')
randomFileNumber = "ip_" + str(random.randint(1, 100000))
printAndFlush('Compilation :' + compilation_path + '| Video #:' +
              randomFileNumber)

video = VideoFileClip(compilation_path)

logo = (
    ImageClip(logo_path).set_duration(video.duration).resize(
        height=60)  # if you need to resize...
    .margin(right=10, top=10, opacity=0)  # (optional) logo-border padding
    .set_pos(("right", "top")))

final = CompositeVideoClip([video, logo])
final.write_videofile(randomFileNumber + '.mp4',
                      temp_audiofile=randomFileNumber + "-audio.m4a",
                      remove_temp=True,
                      codec="libx264",
                      audio_codec="aac",
                      logger=None)
printAndFlush('Finish compilation logo composite')
os.remove(compilation_path)
printAndFlush('removing old file compilation_path')
os.rename(str(randomFileNumber) + '.mp4', compilation_path)
printAndFlush('Renaming file to compilation name')
Exemple #21
0
def createVideo(text):
    epi = epitran.Epitran('deu-Latn')
    text = text.lower()
    words = text.split()  # split into words and remove empty
    text_clean = ' '.join(words)

    clip_db, created_clip_db = Clip.objects.get_or_create(text=text_clean)
    if not created_clip_db:  # read existing file, renew life cycle
        clip_db.accessed = clip_db.accessed + 1
        clip_db.save()

    # delete old files
    cutoffTime = datetime.datetime.now() - datetime.timedelta(
        seconds=settings.KURZ_MAX_CACHE)
    clips_delete = Clip.objects.filter(created__lt=cutoffTime)

    for clip_del in clips_delete.all():
        try:
            os.remove(
                os.path.join(settings.KURZ_CLIPS,
                             str(clip_del.id) + '.mp4'))
        except FileNotFoundError:
            pass

    # create file only if it doesn't already exist or caching is disabled
    if not os.path.exists(os.path.join(settings.KURZ_CLIPS, str(clip_db.id) + '.mp4'))\
            or not settings.KURZ_ENABLE_CACHE:
        videoFileClips = []
        for word in words:
            vidPath = os.path.join(settings.KURZ_WORDS, word + '.mp4')
            if os.path.exists(vidPath):
                videoFileClips.append(VideoFileClip(vidPath))
            else:
                # save entry for stats
                word_db, created_word = MissingWord.objects.get_or_create(
                    word=word)
                if not created_word:
                    word_db.accessed = word_db.accessed + 1
                    word_db.save()

                # build word from phonemes
                phonemes = epi.xsampa_list(word)

                for phoneme in phonemes:
                    phoneme_simple = phoneme.replace(':', '')
                    if phoneme_simple.isupper():
                        phoneme_simple = '^' + phoneme_simple.lower()

                    phonemePath = os.path.join(settings.KURZ_PHONEMES,
                                               phoneme_simple + '.mp4')

                    if os.path.exists(phonemePath):
                        videoFileClips.append(VideoFileClip(phonemePath))
                    else:
                        print('Phoneme not found: %s' % phoneme_simple)

                videoFileClips.append(
                    VideoFileClip(os.path.join(settings.KURZ_PHONEMES,
                                               '_.mp4')))

        if len(videoFileClips) == 0:
            emptyVidPath = os.path.join(settings.KURZ_PHONEMES, '_.mp4')
            if os.path.exists(emptyVidPath):
                videoFileClips.append(VideoFileClip(emptyVidPath))

        # resize clips
        videoFileClips = [clip.resize(height=360) for clip in videoFileClips]

        # concatenate
        clip = concatenate_videoclips(videoFileClips, method='compose')

        # add caption
        text = TextClip(
            'kurzspricht.at',
            fontsize=15,
            size=(100, 20),
            align='center',
            method='caption',
            color='white',
        )\
            .set_duration(clip.duration)\
            .set_position((530, 15))
        grad = TextClip(
            '',
            fontsize=15,
            size=(100, 25),
            align='South',
            method='caption',
            color='#2f63c9',
            bg_color='#2f63c9'
        )\
            .set_duration(clip.duration)\
            .set_position((530, 8))\
            .set_opacity(0.8)
        clip = CompositeVideoClip([clip, grad, text])
        clip.write_videofile(os.path.join(settings.KURZ_CLIPS,
                                          str(clip_db.id) + '.mp4'),
                             logger=None)

    return str(clip_db.id) + '.mp4'
Exemple #22
0
    # fps_source to fix 2x speed bug
    # https://github.com/Zulko/moviepy/issues/1263#issuecomment-659470362
    bgvid = VideoFileClip(args.bgvid_path, fps_source='fps')

    # Transparency in moviepy and gizeh
    # https://github.com/Zulko/moviepy/issues/898#issuecomment-452361230
    alpha_mask = VideoClip(alpha_at,
                           duration=bgvid.duration - args.keys_appear_at,
                           ismask=True)

    if keyvid is None:
        keyvid = VideoClip(rgb_at)
    keyvid.duration = alpha_mask.duration
    keyvid = keyvid.set_mask(alpha_mask)

    composite = CompositeVideoClip([
        bgvid,
        keyvid.set_position(
            ('center', 0.6),
            relative=True).fx(resize, 0.5).set_start(args.keys_appear_at)
    ])

    out_path = args.composite_out
    if out_path is None:
        out_path = Path(args.bgvid_path)
        # Awaiting https://github.com/python/cpython/pull/19295#issue-397309516
        out_path = out_path.with_name(out_path.stem + '-with-keys' +
                                      out_path.suffix)
        out_path = str(out_path)
    composite.write_videofile(out_path, fps=24)
Exemple #23
0
    image = Image.new(mode='RGBA', size=logosize)
    draw_table = ImageDraw.Draw(im=image)
    draw_table.text(xy=(0, 0), text=series_txt, fill='#FFFFFF', font=ImageFont.truetype('./font/Alibaba-PuHuiTi-Regular.ttf', 20))
    image.save('./logo/text_logo.png', 'PNG')
    image.close()

    video_name = filename
    video_path = './video/'+video_name
    video = VideoFileClip(video_path)
    if video.audio is None:
        video = VideoFileClip(video_path)
    else:
        newaudio = video.audio.fx(volumex,0)
        video = video.set_audio(newaudio)
    logo = (
        ImageClip(logo_path)
        .set_duration(video.duration)
        .resize(height=50)
        .set_pos(("left", "top"))
    )
    text = (
        ImageClip("./logo/text_logo.png")
        .set_duration(video.duration)
        .resize(height=20)
        .set_pos(("right", "bottom"))
    )
    #txt = TextClip(series_txt, color='white', font = './font/Alibaba-PuHuiTi-Regular.ttf', fontsize=20).set_pos(('right', 'bottom')).set_duration(video.duration)
    #final = CompositeVideoClip([video, logo, txt])
    final = CompositeVideoClip([video, logo, text])
    final.write_videofile("./video_result/{}.mp4".format(series_txt), codec="libx264", bitrate="10000000")
Exemple #24
0
    def process(self):

        my_logger = MyBarLogger(self.message, self.progress)
        my_logger(message="Detectron2 - Initializing the predictor")

        # Detectron2 默认设置
        cfg = get_cfg()
        # PointRend 设置
        add_pointrend_config(cfg)
        # 从该文件读取PointRend的参数设置
        cfg.merge_from_file(
            "projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml"
        )
        # 阈值,若阈值过低推断速度会很慢
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        # 读取预训练模型的权重参数
        cfg.MODEL.WEIGHTS = "datasets/model_final_3c3198.pkl"
        if not torch.cuda.is_available():
            cfg.MODEL.DEVICE = 'cpu'

        predictor = DefaultPredictor(cfg)

        # 逐帧过滤不需要的类别的数据,为to_mask转换做准备
        def custom_frame(frame):
            _frame = frame.copy()
            output = predictor(_frame)
            instances = output['instances'].to('cpu')
            data = {
                'classes': instances.pred_classes.numpy(),
                'boxes': instances.pred_boxes.tensor.numpy(),
                'masks': instances.pred_masks.numpy(),
                'scores': instances.scores.numpy()
            }
            # 设定接收人类的数据
            data = process(data, target_class=[class_names.index('person')])
            result = custom_show(_frame, data['masks'])
            return result

        # 以最终帧高度为准,使所有目标素材的帧高度相同,在concatenate的compose模式下可保证不因拉伸而失真
        for i in range(len(self.targets)):
            self.targets[i] = self.targets[i].fx(vfx.resize,
                                                 height=self.height)
        for i in range(len(self.backgrounds)):
            self.backgrounds[i] = self.backgrounds[i].fx(
                vfx.resize, (self.width, self.height))

        # concatenate简单拼接
        target = concatenate_videoclips(self.targets,
                                        method="compose").without_audio()

        background = concatenate_videoclips(self.backgrounds).without_audio()

        # 计算总时长,若有音频则拼接音频
        audio = None
        duration = min(target.duration, background.duration)
        if self.audios:
            audio = concatenate_audioclips(self.audios)
            duration = min(target.duration, background.duration,
                           audio.duration)

        # 把目标的识别结果——size为(n,w,h)的ndarray转换为mask,该mask表明它所属的片段哪些部分在背景上可见
        mask_clip = target.fl_image(custom_frame).to_mask()

        # 在目标或背景上进行高斯模糊
        if self.gauss_target:
            target = target.fl_image(blur)
        if self.gauss_background:
            background = background.fl_image(blur)
        # 在目标上添加抖音效果
        if self.tiktok:
            target = target.fl_image(tiktok_effect)
        # 分身效果
        if self.triple:
            temp = self.triple_effect(target,
                                      mask_clip,
                                      width=self.width,
                                      height=self.height)
            temp.insert(0, background)
        else:
            # set_mask使得被识别为True的部分在背景上可见
            target = target.set_mask(mask_clip).set_position(
                "center", "center")
            temp = [background, target]

        # 拼接所有目标素材
        final_clip = CompositeVideoClip(temp).set_audio(audio). \
            set_duration(duration) if audio else CompositeVideoClip(temp).set_duration(duration)

        # 导出为文件
        final_clip.write_videofile(
            f'./output/{time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())}.mp4',
            fps=30,
            codec='mpeg4',
            bitrate="8000k",
            audio_codec="libmp3lame",
            threads=4,
            logger=my_logger)

        self.finish_process.emit()